1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 3c54fce6eSTejun Heo * kernel/workqueue.c - generic async execution with shared worker pool 41da177e4SLinus Torvalds * 5c54fce6eSTejun Heo * Copyright (C) 2002 Ingo Molnar 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Derived from the taskqueue/keventd code by: 81da177e4SLinus Torvalds * David Woodhouse <dwmw2@infradead.org> 9e1f8e874SFrancois Cami * Andrew Morton 101da177e4SLinus Torvalds * Kai Petzke <wpp@marie.physik.tu-berlin.de> 111da177e4SLinus Torvalds * Theodore Ts'o <tytso@mit.edu> 1289ada679SChristoph Lameter * 13cde53535SChristoph Lameter * Made to use alloc_percpu by Christoph Lameter. 14c54fce6eSTejun Heo * 15c54fce6eSTejun Heo * Copyright (C) 2010 SUSE Linux Products GmbH 16c54fce6eSTejun Heo * Copyright (C) 2010 Tejun Heo <tj@kernel.org> 17c54fce6eSTejun Heo * 18c54fce6eSTejun Heo * This is the generic async execution mechanism. Work items as are 19c54fce6eSTejun Heo * executed in process context. The worker pool is shared and 20b11895c4SLibin * automatically managed. There are two worker pools for each CPU (one for 21b11895c4SLibin * normal work items and the other for high priority ones) and some extra 22b11895c4SLibin * pools for workqueues which are not bound to any specific CPU - the 23b11895c4SLibin * number of these backing pools is dynamic. 24c54fce6eSTejun Heo * 259a261491SBenjamin Peterson * Please read Documentation/core-api/workqueue.rst for details. 261da177e4SLinus Torvalds */ 271da177e4SLinus Torvalds 289984de1aSPaul Gortmaker #include <linux/export.h> 291da177e4SLinus Torvalds #include <linux/kernel.h> 301da177e4SLinus Torvalds #include <linux/sched.h> 311da177e4SLinus Torvalds #include <linux/init.h> 321da177e4SLinus Torvalds #include <linux/signal.h> 331da177e4SLinus Torvalds #include <linux/completion.h> 341da177e4SLinus Torvalds #include <linux/workqueue.h> 351da177e4SLinus Torvalds #include <linux/slab.h> 361da177e4SLinus Torvalds #include <linux/cpu.h> 371da177e4SLinus Torvalds #include <linux/notifier.h> 381da177e4SLinus Torvalds #include <linux/kthread.h> 391fa44ecaSJames Bottomley #include <linux/hardirq.h> 4046934023SChristoph Lameter #include <linux/mempolicy.h> 41341a5958SRafael J. Wysocki #include <linux/freezer.h> 42d5abe669SPeter Zijlstra #include <linux/debug_locks.h> 434e6045f1SJohannes Berg #include <linux/lockdep.h> 44c34056a3STejun Heo #include <linux/idr.h> 4529c91e99STejun Heo #include <linux/jhash.h> 4642f8570fSSasha Levin #include <linux/hashtable.h> 4776af4d93STejun Heo #include <linux/rculist.h> 48bce90380STejun Heo #include <linux/nodemask.h> 494c16bd32STejun Heo #include <linux/moduleparam.h> 503d1cb205STejun Heo #include <linux/uaccess.h> 51c98a9805STal Shorer #include <linux/sched/isolation.h> 52cd2440d6SPetr Mladek #include <linux/sched/debug.h> 5362635ea8SSergey Senozhatsky #include <linux/nmi.h> 54940d71c6SSergey Senozhatsky #include <linux/kvm_para.h> 55aa6fde93STejun Heo #include <linux/delay.h> 56e22bee78STejun Heo 57ea138446STejun Heo #include "workqueue_internal.h" 581da177e4SLinus Torvalds 59c8e55f36STejun Heo enum { 60bc2ae0f5STejun Heo /* 6124647570STejun Heo * worker_pool flags 62bc2ae0f5STejun Heo * 6324647570STejun Heo * A bound pool is either associated or disassociated with its CPU. 64bc2ae0f5STejun Heo * While associated (!DISASSOCIATED), all workers are bound to the 65bc2ae0f5STejun Heo * CPU and none has %WORKER_UNBOUND set and concurrency management 66bc2ae0f5STejun Heo * is in effect. 67bc2ae0f5STejun Heo * 68bc2ae0f5STejun Heo * While DISASSOCIATED, the cpu may be offline and all workers have 69bc2ae0f5STejun Heo * %WORKER_UNBOUND set and concurrency management disabled, and may 7024647570STejun Heo * be executing on any CPU. The pool behaves as an unbound one. 71bc2ae0f5STejun Heo * 72bc3a1afcSTejun Heo * Note that DISASSOCIATED should be flipped only while holding 731258fae7STejun Heo * wq_pool_attach_mutex to avoid changing binding state while 744736cbf7SLai Jiangshan * worker_attach_to_pool() is in progress. 75bc2ae0f5STejun Heo */ 76692b4825STejun Heo POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */ 7724647570STejun Heo POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ 78db7bccf4STejun Heo 79c8e55f36STejun Heo /* worker flags */ 80c8e55f36STejun Heo WORKER_DIE = 1 << 1, /* die die die */ 81c8e55f36STejun Heo WORKER_IDLE = 1 << 2, /* is idle */ 82e22bee78STejun Heo WORKER_PREP = 1 << 3, /* preparing to run works */ 83fb0e7bebSTejun Heo WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ 84f3421797STejun Heo WORKER_UNBOUND = 1 << 7, /* worker is unbound */ 85a9ab775bSTejun Heo WORKER_REBOUND = 1 << 8, /* worker was rebound */ 86e22bee78STejun Heo 87a9ab775bSTejun Heo WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE | 88a9ab775bSTejun Heo WORKER_UNBOUND | WORKER_REBOUND, 89db7bccf4STejun Heo 90e34cdddbSTejun Heo NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */ 914ce62e9eSTejun Heo 9229c91e99STejun Heo UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */ 93c8e55f36STejun Heo BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ 94db7bccf4STejun Heo 95e22bee78STejun Heo MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ 96e22bee78STejun Heo IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ 97e22bee78STejun Heo 983233cdbdSTejun Heo MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, 993233cdbdSTejun Heo /* call for help after 10ms 1003233cdbdSTejun Heo (min two ticks) */ 101e22bee78STejun Heo MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ 102e22bee78STejun Heo CREATE_COOLDOWN = HZ, /* time to breath after fail */ 1031da177e4SLinus Torvalds 1041da177e4SLinus Torvalds /* 105e22bee78STejun Heo * Rescue workers are used only on emergencies and shared by 1068698a745SDongsheng Yang * all cpus. Give MIN_NICE. 107e22bee78STejun Heo */ 1088698a745SDongsheng Yang RESCUER_NICE_LEVEL = MIN_NICE, 1098698a745SDongsheng Yang HIGHPRI_NICE_LEVEL = MIN_NICE, 110ecf6881fSTejun Heo 11143a181f8SAudra Mitchell WQ_NAME_LEN = 32, 112c8e55f36STejun Heo }; 113c8e55f36STejun Heo 1141da177e4SLinus Torvalds /* 1154690c4abSTejun Heo * Structure fields follow one of the following exclusion rules. 1164690c4abSTejun Heo * 117e41e704bSTejun Heo * I: Modifiable by initialization/destruction paths and read-only for 118e41e704bSTejun Heo * everyone else. 1194690c4abSTejun Heo * 120e22bee78STejun Heo * P: Preemption protected. Disabling preemption is enough and should 121e22bee78STejun Heo * only be modified and accessed from the local cpu. 122e22bee78STejun Heo * 123d565ed63STejun Heo * L: pool->lock protected. Access with pool->lock held. 1244690c4abSTejun Heo * 125bdf8b9bfSTejun Heo * K: Only modified by worker while holding pool->lock. Can be safely read by 126bdf8b9bfSTejun Heo * self, while holding pool->lock or from IRQ context if %current is the 127bdf8b9bfSTejun Heo * kworker. 128bdf8b9bfSTejun Heo * 129bdf8b9bfSTejun Heo * S: Only modified by worker self. 130bdf8b9bfSTejun Heo * 1311258fae7STejun Heo * A: wq_pool_attach_mutex protected. 132822d8405STejun Heo * 13368e13a67SLai Jiangshan * PL: wq_pool_mutex protected. 13476af4d93STejun Heo * 13524acfb71SThomas Gleixner * PR: wq_pool_mutex protected for writes. RCU protected for reads. 1365bcab335STejun Heo * 1375b95e1afSLai Jiangshan * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads. 1385b95e1afSLai Jiangshan * 1395b95e1afSLai Jiangshan * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or 14024acfb71SThomas Gleixner * RCU for reads. 1415b95e1afSLai Jiangshan * 1423c25a55dSLai Jiangshan * WQ: wq->mutex protected. 1433c25a55dSLai Jiangshan * 14424acfb71SThomas Gleixner * WR: wq->mutex protected for writes. RCU protected for reads. 1452e109a28STejun Heo * 14682e098f5STejun Heo * WO: wq->mutex protected for writes. Updated with WRITE_ONCE() and can be read 14782e098f5STejun Heo * with READ_ONCE() without locking. 14882e098f5STejun Heo * 1492e109a28STejun Heo * MD: wq_mayday_lock protected. 150cd2440d6SPetr Mladek * 151cd2440d6SPetr Mladek * WD: Used internally by the watchdog. 1524690c4abSTejun Heo */ 1534690c4abSTejun Heo 1542eaebdb3STejun Heo /* struct worker is defined in workqueue_internal.h */ 155c34056a3STejun Heo 156bd7bdd43STejun Heo struct worker_pool { 157a9b8a985SSebastian Andrzej Siewior raw_spinlock_t lock; /* the pool lock */ 158d84ff051STejun Heo int cpu; /* I: the associated cpu */ 159f3f90ad4STejun Heo int node; /* I: the associated node ID */ 1609daf9e67STejun Heo int id; /* I: pool ID */ 161bc8b50c2STejun Heo unsigned int flags; /* L: flags */ 162bd7bdd43STejun Heo 16382607adcSTejun Heo unsigned long watchdog_ts; /* L: watchdog timestamp */ 164cd2440d6SPetr Mladek bool cpu_stall; /* WD: stalled cpu bound pool */ 16582607adcSTejun Heo 166bc35f7efSLai Jiangshan /* 167bc35f7efSLai Jiangshan * The counter is incremented in a process context on the associated CPU 168bc35f7efSLai Jiangshan * w/ preemption disabled, and decremented or reset in the same context 169bc35f7efSLai Jiangshan * but w/ pool->lock held. The readers grab pool->lock and are 170bc35f7efSLai Jiangshan * guaranteed to see if the counter reached zero. 171bc35f7efSLai Jiangshan */ 172bc35f7efSLai Jiangshan int nr_running; 17384f91c62SLai Jiangshan 174bd7bdd43STejun Heo struct list_head worklist; /* L: list of pending works */ 175ea1abd61SLai Jiangshan 1765826cc8fSLai Jiangshan int nr_workers; /* L: total number of workers */ 1775826cc8fSLai Jiangshan int nr_idle; /* L: currently idle workers */ 178bd7bdd43STejun Heo 1792c1f1a91SLai Jiangshan struct list_head idle_list; /* L: list of idle workers */ 180bd7bdd43STejun Heo struct timer_list idle_timer; /* L: worker idle timeout */ 1813f959aa3SValentin Schneider struct work_struct idle_cull_work; /* L: worker idle cleanup */ 1823f959aa3SValentin Schneider 183bd7bdd43STejun Heo struct timer_list mayday_timer; /* L: SOS timer for workers */ 184bd7bdd43STejun Heo 185c5aa87bbSTejun Heo /* a workers is either on busy_hash or idle_list, or the manager */ 186c9e7cf27STejun Heo DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER); 187c9e7cf27STejun Heo /* L: hash of busy workers */ 188c9e7cf27STejun Heo 1892607d7a6STejun Heo struct worker *manager; /* L: purely informational */ 19092f9c5c4SLai Jiangshan struct list_head workers; /* A: attached workers */ 191e02b9312SValentin Schneider struct list_head dying_workers; /* A: workers about to die */ 19260f5a4bcSLai Jiangshan struct completion *detach_completion; /* all workers detached */ 193e19e397aSTejun Heo 1947cda9aaeSLai Jiangshan struct ida worker_ida; /* worker IDs for task name */ 195e19e397aSTejun Heo 1967a4e344cSTejun Heo struct workqueue_attrs *attrs; /* I: worker attributes */ 19768e13a67SLai Jiangshan struct hlist_node hash_node; /* PL: unbound_pool_hash node */ 19868e13a67SLai Jiangshan int refcnt; /* PL: refcnt for unbound pools */ 1997a4e344cSTejun Heo 200e19e397aSTejun Heo /* 20124acfb71SThomas Gleixner * Destruction of pool is RCU protected to allow dereferences 20229c91e99STejun Heo * from get_work_pool(). 20329c91e99STejun Heo */ 20429c91e99STejun Heo struct rcu_head rcu; 20584f91c62SLai Jiangshan }; 2068b03ae3cSTejun Heo 2078b03ae3cSTejun Heo /* 208725e8ec5STejun Heo * Per-pool_workqueue statistics. These can be monitored using 209725e8ec5STejun Heo * tools/workqueue/wq_monitor.py. 210725e8ec5STejun Heo */ 211725e8ec5STejun Heo enum pool_workqueue_stats { 212725e8ec5STejun Heo PWQ_STAT_STARTED, /* work items started execution */ 213725e8ec5STejun Heo PWQ_STAT_COMPLETED, /* work items completed execution */ 2148a1dd1e5STejun Heo PWQ_STAT_CPU_TIME, /* total CPU time consumed */ 215616db877STejun Heo PWQ_STAT_CPU_INTENSIVE, /* wq_cpu_intensive_thresh_us violations */ 216725e8ec5STejun Heo PWQ_STAT_CM_WAKEUP, /* concurrency-management worker wakeups */ 2178639ecebSTejun Heo PWQ_STAT_REPATRIATED, /* unbound workers brought back into scope */ 218725e8ec5STejun Heo PWQ_STAT_MAYDAY, /* maydays to rescuer */ 219725e8ec5STejun Heo PWQ_STAT_RESCUED, /* linked work items executed by rescuer */ 220725e8ec5STejun Heo 221725e8ec5STejun Heo PWQ_NR_STATS, 222725e8ec5STejun Heo }; 223725e8ec5STejun Heo 224725e8ec5STejun Heo /* 225112202d9STejun Heo * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS 226112202d9STejun Heo * of work_struct->data are used for flags and the remaining high bits 227112202d9STejun Heo * point to the pwq; thus, pwqs need to be aligned at two's power of the 228112202d9STejun Heo * number of flag bits. 2291da177e4SLinus Torvalds */ 230112202d9STejun Heo struct pool_workqueue { 231bd7bdd43STejun Heo struct worker_pool *pool; /* I: the associated pool */ 2324690c4abSTejun Heo struct workqueue_struct *wq; /* I: the owning workqueue */ 23373f53c4aSTejun Heo int work_color; /* L: current color */ 23473f53c4aSTejun Heo int flush_color; /* L: flushing color */ 2358864b4e5STejun Heo int refcnt; /* L: reference count */ 23673f53c4aSTejun Heo int nr_in_flight[WORK_NR_COLORS]; 23773f53c4aSTejun Heo /* L: nr of in_flight works */ 238018f3a13SLai Jiangshan 239018f3a13SLai Jiangshan /* 240018f3a13SLai Jiangshan * nr_active management and WORK_STRUCT_INACTIVE: 241018f3a13SLai Jiangshan * 242018f3a13SLai Jiangshan * When pwq->nr_active >= max_active, new work item is queued to 243018f3a13SLai Jiangshan * pwq->inactive_works instead of pool->worklist and marked with 244018f3a13SLai Jiangshan * WORK_STRUCT_INACTIVE. 245018f3a13SLai Jiangshan * 2466741dd3fSGreg Kroah-Hartman * All work items marked with WORK_STRUCT_INACTIVE do not participate 2476741dd3fSGreg Kroah-Hartman * in pwq->nr_active and all work items in pwq->inactive_works are 2486741dd3fSGreg Kroah-Hartman * marked with WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE 2496741dd3fSGreg Kroah-Hartman * work items are in pwq->inactive_works. Some of them are ready to 2506741dd3fSGreg Kroah-Hartman * run in pool->worklist or worker->scheduled. Those work itmes are 2516741dd3fSGreg Kroah-Hartman * only struct wq_barrier which is used for flush_work() and should 2526741dd3fSGreg Kroah-Hartman * not participate in pwq->nr_active. For non-barrier work item, it 2536741dd3fSGreg Kroah-Hartman * is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works. 254018f3a13SLai Jiangshan */ 2551e19ffc6STejun Heo int nr_active; /* L: nr of active works */ 256f97a4a1aSLai Jiangshan struct list_head inactive_works; /* L: inactive works */ 2573c25a55dSLai Jiangshan struct list_head pwqs_node; /* WR: node on wq->pwqs */ 2582e109a28STejun Heo struct list_head mayday_node; /* MD: node on wq->maydays */ 2598864b4e5STejun Heo 260725e8ec5STejun Heo u64 stats[PWQ_NR_STATS]; 261725e8ec5STejun Heo 2628864b4e5STejun Heo /* 263967b494eSTejun Heo * Release of unbound pwq is punted to a kthread_worker. See put_pwq() 264687a9aa5STejun Heo * and pwq_release_workfn() for details. pool_workqueue itself is also 265687a9aa5STejun Heo * RCU protected so that the first pwq can be determined without 266967b494eSTejun Heo * grabbing wq->mutex. 2678864b4e5STejun Heo */ 268687a9aa5STejun Heo struct kthread_work release_work; 2698864b4e5STejun Heo struct rcu_head rcu; 270e904e6c2STejun Heo } __aligned(1 << WORK_STRUCT_FLAG_BITS); 2711da177e4SLinus Torvalds 2721da177e4SLinus Torvalds /* 27373f53c4aSTejun Heo * Structure used to wait for workqueue flush. 27473f53c4aSTejun Heo */ 27573f53c4aSTejun Heo struct wq_flusher { 2763c25a55dSLai Jiangshan struct list_head list; /* WQ: list of flushers */ 2773c25a55dSLai Jiangshan int flush_color; /* WQ: flush color waiting for */ 27873f53c4aSTejun Heo struct completion done; /* flush completion */ 27973f53c4aSTejun Heo }; 2801da177e4SLinus Torvalds 281226223abSTejun Heo struct wq_device; 282226223abSTejun Heo 28373f53c4aSTejun Heo /* 284c5aa87bbSTejun Heo * The externally visible workqueue. It relays the issued work items to 285c5aa87bbSTejun Heo * the appropriate worker_pool through its pool_workqueues. 2861da177e4SLinus Torvalds */ 2871da177e4SLinus Torvalds struct workqueue_struct { 2883c25a55dSLai Jiangshan struct list_head pwqs; /* WR: all pwqs of this wq */ 289e2dca7adSTejun Heo struct list_head list; /* PR: list of all workqueues */ 29073f53c4aSTejun Heo 2913c25a55dSLai Jiangshan struct mutex mutex; /* protects this wq */ 2923c25a55dSLai Jiangshan int work_color; /* WQ: current work color */ 2933c25a55dSLai Jiangshan int flush_color; /* WQ: current flush color */ 294112202d9STejun Heo atomic_t nr_pwqs_to_flush; /* flush in progress */ 2953c25a55dSLai Jiangshan struct wq_flusher *first_flusher; /* WQ: first flusher */ 2963c25a55dSLai Jiangshan struct list_head flusher_queue; /* WQ: flush waiters */ 2973c25a55dSLai Jiangshan struct list_head flusher_overflow; /* WQ: flush overflow list */ 29873f53c4aSTejun Heo 2992e109a28STejun Heo struct list_head maydays; /* MD: pwqs requesting rescue */ 30030ae2fc0STejun Heo struct worker *rescuer; /* MD: rescue worker */ 301e22bee78STejun Heo 30287fc741eSLai Jiangshan int nr_drainers; /* WQ: drain in progress */ 30382e098f5STejun Heo int max_active; /* WO: max active works */ 30482e098f5STejun Heo int saved_max_active; /* WQ: saved max_active */ 305226223abSTejun Heo 3065b95e1afSLai Jiangshan struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */ 307bd31fb92STejun Heo struct pool_workqueue __rcu *dfl_pwq; /* PW: only for unbound wqs */ 3086029a918STejun Heo 309226223abSTejun Heo #ifdef CONFIG_SYSFS 310226223abSTejun Heo struct wq_device *wq_dev; /* I: for sysfs interface */ 311226223abSTejun Heo #endif 3124e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP 313669de8bdSBart Van Assche char *lock_name; 314669de8bdSBart Van Assche struct lock_class_key key; 3154e6045f1SJohannes Berg struct lockdep_map lockdep_map; 3164e6045f1SJohannes Berg #endif 317ecf6881fSTejun Heo char name[WQ_NAME_LEN]; /* I: workqueue name */ 3182728fd2fSTejun Heo 319e2dca7adSTejun Heo /* 32024acfb71SThomas Gleixner * Destruction of workqueue_struct is RCU protected to allow walking 32124acfb71SThomas Gleixner * the workqueues list without grabbing wq_pool_mutex. 322e2dca7adSTejun Heo * This is used to dump all workqueues from sysrq. 323e2dca7adSTejun Heo */ 324e2dca7adSTejun Heo struct rcu_head rcu; 325e2dca7adSTejun Heo 3262728fd2fSTejun Heo /* hot fields used during command issue, aligned to cacheline */ 3272728fd2fSTejun Heo unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */ 328636b927eSTejun Heo struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */ 3291da177e4SLinus Torvalds }; 3301da177e4SLinus Torvalds 331e904e6c2STejun Heo static struct kmem_cache *pwq_cache; 332e904e6c2STejun Heo 33384193c07STejun Heo /* 33484193c07STejun Heo * Each pod type describes how CPUs should be grouped for unbound workqueues. 33584193c07STejun Heo * See the comment above workqueue_attrs->affn_scope. 33684193c07STejun Heo */ 33784193c07STejun Heo struct wq_pod_type { 33884193c07STejun Heo int nr_pods; /* number of pods */ 33984193c07STejun Heo cpumask_var_t *pod_cpus; /* pod -> cpus */ 34084193c07STejun Heo int *pod_node; /* pod -> node */ 34184193c07STejun Heo int *cpu_pod; /* cpu -> pod */ 34284193c07STejun Heo }; 34384193c07STejun Heo 34484193c07STejun Heo static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES]; 345523a301eSTejun Heo static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE; 34663c5484eSTejun Heo 34763c5484eSTejun Heo static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = { 348523a301eSTejun Heo [WQ_AFFN_DFL] = "default", 34963c5484eSTejun Heo [WQ_AFFN_CPU] = "cpu", 35063c5484eSTejun Heo [WQ_AFFN_SMT] = "smt", 35163c5484eSTejun Heo [WQ_AFFN_CACHE] = "cache", 35263c5484eSTejun Heo [WQ_AFFN_NUMA] = "numa", 35363c5484eSTejun Heo [WQ_AFFN_SYSTEM] = "system", 35463c5484eSTejun Heo }; 355bce90380STejun Heo 356616db877STejun Heo /* 357616db877STejun Heo * Per-cpu work items which run for longer than the following threshold are 358616db877STejun Heo * automatically considered CPU intensive and excluded from concurrency 359616db877STejun Heo * management to prevent them from noticeably delaying other per-cpu work items. 360aa6fde93STejun Heo * ULONG_MAX indicates that the user hasn't overridden it with a boot parameter. 361aa6fde93STejun Heo * The actual value is initialized in wq_cpu_intensive_thresh_init(). 362616db877STejun Heo */ 363aa6fde93STejun Heo static unsigned long wq_cpu_intensive_thresh_us = ULONG_MAX; 364616db877STejun Heo module_param_named(cpu_intensive_thresh_us, wq_cpu_intensive_thresh_us, ulong, 0644); 365616db877STejun Heo 366cee22a15SViresh Kumar /* see the comment above the definition of WQ_POWER_EFFICIENT */ 367552f530cSLuis R. Rodriguez static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT); 368cee22a15SViresh Kumar module_param_named(power_efficient, wq_power_efficient, bool, 0444); 369cee22a15SViresh Kumar 370863b710bSTejun Heo static bool wq_online; /* can kworkers be created yet? */ 3713347fa09STejun Heo 372fef59c9cSTejun Heo /* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */ 373fef59c9cSTejun Heo static struct workqueue_attrs *wq_update_pod_attrs_buf; 3744c16bd32STejun Heo 37568e13a67SLai Jiangshan static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ 3761258fae7STejun Heo static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */ 377a9b8a985SSebastian Andrzej Siewior static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ 378d8bb65abSSebastian Andrzej Siewior /* wait for manager to go away */ 379d8bb65abSSebastian Andrzej Siewior static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait); 3805bcab335STejun Heo 381e2dca7adSTejun Heo static LIST_HEAD(workqueues); /* PR: list of all workqueues */ 38268e13a67SLai Jiangshan static bool workqueue_freezing; /* PL: have wqs started freezing? */ 3837d19c5ceSTejun Heo 38499c621efSLai Jiangshan /* PL&A: allowable cpus for unbound wqs and work items */ 385ef557180SMike Galbraith static cpumask_var_t wq_unbound_cpumask; 386ef557180SMike Galbraith 387ace3c549Stiozhang /* for further constrain wq_unbound_cpumask by cmdline parameter*/ 388ace3c549Stiozhang static struct cpumask wq_cmdline_cpumask __initdata; 389ace3c549Stiozhang 390ef557180SMike Galbraith /* CPU where unbound work was last round robin scheduled from this CPU */ 391ef557180SMike Galbraith static DEFINE_PER_CPU(int, wq_rr_cpu_last); 392b05a7928SFrederic Weisbecker 393f303fccbSTejun Heo /* 394f303fccbSTejun Heo * Local execution of unbound work items is no longer guaranteed. The 395f303fccbSTejun Heo * following always forces round-robin CPU selection on unbound work items 396f303fccbSTejun Heo * to uncover usages which depend on it. 397f303fccbSTejun Heo */ 398f303fccbSTejun Heo #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU 399f303fccbSTejun Heo static bool wq_debug_force_rr_cpu = true; 400f303fccbSTejun Heo #else 401f303fccbSTejun Heo static bool wq_debug_force_rr_cpu = false; 402f303fccbSTejun Heo #endif 403f303fccbSTejun Heo module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644); 404f303fccbSTejun Heo 4057d19c5ceSTejun Heo /* the per-cpu worker pools */ 40625528213SPeter Zijlstra static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools); 4077d19c5ceSTejun Heo 40868e13a67SLai Jiangshan static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */ 4097d19c5ceSTejun Heo 41068e13a67SLai Jiangshan /* PL: hash of all unbound pools keyed by pool->attrs */ 41129c91e99STejun Heo static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER); 41229c91e99STejun Heo 413c5aa87bbSTejun Heo /* I: attributes used when instantiating standard unbound pools on demand */ 41429c91e99STejun Heo static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; 41529c91e99STejun Heo 4168a2b7538STejun Heo /* I: attributes used when instantiating ordered pools on demand */ 4178a2b7538STejun Heo static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS]; 4188a2b7538STejun Heo 419967b494eSTejun Heo /* 420967b494eSTejun Heo * I: kthread_worker to release pwq's. pwq release needs to be bounced to a 421967b494eSTejun Heo * process context while holding a pool lock. Bounce to a dedicated kthread 422967b494eSTejun Heo * worker to avoid A-A deadlocks. 423967b494eSTejun Heo */ 424967b494eSTejun Heo static struct kthread_worker *pwq_release_worker; 425967b494eSTejun Heo 426d320c038STejun Heo struct workqueue_struct *system_wq __read_mostly; 427ad7b1f84SMarc Dionne EXPORT_SYMBOL(system_wq); 428044c782cSValentin Ilie struct workqueue_struct *system_highpri_wq __read_mostly; 4291aabe902SJoonsoo Kim EXPORT_SYMBOL_GPL(system_highpri_wq); 430044c782cSValentin Ilie struct workqueue_struct *system_long_wq __read_mostly; 431d320c038STejun Heo EXPORT_SYMBOL_GPL(system_long_wq); 432044c782cSValentin Ilie struct workqueue_struct *system_unbound_wq __read_mostly; 433f3421797STejun Heo EXPORT_SYMBOL_GPL(system_unbound_wq); 434044c782cSValentin Ilie struct workqueue_struct *system_freezable_wq __read_mostly; 43524d51addSTejun Heo EXPORT_SYMBOL_GPL(system_freezable_wq); 4360668106cSViresh Kumar struct workqueue_struct *system_power_efficient_wq __read_mostly; 4370668106cSViresh Kumar EXPORT_SYMBOL_GPL(system_power_efficient_wq); 4380668106cSViresh Kumar struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly; 4390668106cSViresh Kumar EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); 440d320c038STejun Heo 4417d19c5ceSTejun Heo static int worker_thread(void *__worker); 4426ba94429SFrederic Weisbecker static void workqueue_sysfs_unregister(struct workqueue_struct *wq); 443c29eb853STejun Heo static void show_pwq(struct pool_workqueue *pwq); 44455df0933SImran Khan static void show_one_worker_pool(struct worker_pool *pool); 4457d19c5ceSTejun Heo 44697bd2347STejun Heo #define CREATE_TRACE_POINTS 44797bd2347STejun Heo #include <trace/events/workqueue.h> 44897bd2347STejun Heo 44968e13a67SLai Jiangshan #define assert_rcu_or_pool_mutex() \ 45024acfb71SThomas Gleixner RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ 451f78f5b90SPaul E. McKenney !lockdep_is_held(&wq_pool_mutex), \ 45224acfb71SThomas Gleixner "RCU or wq_pool_mutex should be held") 4535bcab335STejun Heo 4545b95e1afSLai Jiangshan #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ 45524acfb71SThomas Gleixner RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ 456f78f5b90SPaul E. McKenney !lockdep_is_held(&wq->mutex) && \ 457f78f5b90SPaul E. McKenney !lockdep_is_held(&wq_pool_mutex), \ 45824acfb71SThomas Gleixner "RCU, wq->mutex or wq_pool_mutex should be held") 4595b95e1afSLai Jiangshan 460f02ae73aSTejun Heo #define for_each_cpu_worker_pool(pool, cpu) \ 461f02ae73aSTejun Heo for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ 462f02ae73aSTejun Heo (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ 4637a62c2c8STejun Heo (pool)++) 4644ce62e9eSTejun Heo 46549e3cf44STejun Heo /** 46617116969STejun Heo * for_each_pool - iterate through all worker_pools in the system 46717116969STejun Heo * @pool: iteration cursor 468611c92a0STejun Heo * @pi: integer used for iteration 469fa1b54e6STejun Heo * 47024acfb71SThomas Gleixner * This must be called either with wq_pool_mutex held or RCU read 47168e13a67SLai Jiangshan * locked. If the pool needs to be used beyond the locking in effect, the 47268e13a67SLai Jiangshan * caller is responsible for guaranteeing that the pool stays online. 473fa1b54e6STejun Heo * 474fa1b54e6STejun Heo * The if/else clause exists only for the lockdep assertion and can be 475fa1b54e6STejun Heo * ignored. 47617116969STejun Heo */ 477611c92a0STejun Heo #define for_each_pool(pool, pi) \ 478611c92a0STejun Heo idr_for_each_entry(&worker_pool_idr, pool, pi) \ 47968e13a67SLai Jiangshan if (({ assert_rcu_or_pool_mutex(); false; })) { } \ 480fa1b54e6STejun Heo else 48117116969STejun Heo 48217116969STejun Heo /** 483822d8405STejun Heo * for_each_pool_worker - iterate through all workers of a worker_pool 484822d8405STejun Heo * @worker: iteration cursor 485822d8405STejun Heo * @pool: worker_pool to iterate workers of 486822d8405STejun Heo * 4871258fae7STejun Heo * This must be called with wq_pool_attach_mutex. 488822d8405STejun Heo * 489822d8405STejun Heo * The if/else clause exists only for the lockdep assertion and can be 490822d8405STejun Heo * ignored. 491822d8405STejun Heo */ 492da028469SLai Jiangshan #define for_each_pool_worker(worker, pool) \ 493da028469SLai Jiangshan list_for_each_entry((worker), &(pool)->workers, node) \ 4941258fae7STejun Heo if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \ 495822d8405STejun Heo else 496822d8405STejun Heo 497822d8405STejun Heo /** 49849e3cf44STejun Heo * for_each_pwq - iterate through all pool_workqueues of the specified workqueue 49949e3cf44STejun Heo * @pwq: iteration cursor 50049e3cf44STejun Heo * @wq: the target workqueue 50176af4d93STejun Heo * 50224acfb71SThomas Gleixner * This must be called either with wq->mutex held or RCU read locked. 503794b18bcSTejun Heo * If the pwq needs to be used beyond the locking in effect, the caller is 504794b18bcSTejun Heo * responsible for guaranteeing that the pwq stays online. 50576af4d93STejun Heo * 50676af4d93STejun Heo * The if/else clause exists only for the lockdep assertion and can be 50776af4d93STejun Heo * ignored. 50849e3cf44STejun Heo */ 50949e3cf44STejun Heo #define for_each_pwq(pwq, wq) \ 51049e9d1a9SSebastian Andrzej Siewior list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \ 5115a644662SJoel Fernandes (Google) lockdep_is_held(&(wq->mutex))) 512f3421797STejun Heo 513dc186ad7SThomas Gleixner #ifdef CONFIG_DEBUG_OBJECTS_WORK 514dc186ad7SThomas Gleixner 515f9e62f31SStephen Boyd static const struct debug_obj_descr work_debug_descr; 516dc186ad7SThomas Gleixner 51799777288SStanislaw Gruszka static void *work_debug_hint(void *addr) 51899777288SStanislaw Gruszka { 51999777288SStanislaw Gruszka return ((struct work_struct *) addr)->func; 52099777288SStanislaw Gruszka } 52199777288SStanislaw Gruszka 522b9fdac7fSDu, Changbin static bool work_is_static_object(void *addr) 523b9fdac7fSDu, Changbin { 524b9fdac7fSDu, Changbin struct work_struct *work = addr; 525b9fdac7fSDu, Changbin 526b9fdac7fSDu, Changbin return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work)); 527b9fdac7fSDu, Changbin } 528b9fdac7fSDu, Changbin 529dc186ad7SThomas Gleixner /* 530dc186ad7SThomas Gleixner * fixup_init is called when: 531dc186ad7SThomas Gleixner * - an active object is initialized 532dc186ad7SThomas Gleixner */ 53302a982a6SDu, Changbin static bool work_fixup_init(void *addr, enum debug_obj_state state) 534dc186ad7SThomas Gleixner { 535dc186ad7SThomas Gleixner struct work_struct *work = addr; 536dc186ad7SThomas Gleixner 537dc186ad7SThomas Gleixner switch (state) { 538dc186ad7SThomas Gleixner case ODEBUG_STATE_ACTIVE: 539dc186ad7SThomas Gleixner cancel_work_sync(work); 540dc186ad7SThomas Gleixner debug_object_init(work, &work_debug_descr); 54102a982a6SDu, Changbin return true; 542dc186ad7SThomas Gleixner default: 54302a982a6SDu, Changbin return false; 544dc186ad7SThomas Gleixner } 545dc186ad7SThomas Gleixner } 546dc186ad7SThomas Gleixner 547dc186ad7SThomas Gleixner /* 548dc186ad7SThomas Gleixner * fixup_free is called when: 549dc186ad7SThomas Gleixner * - an active object is freed 550dc186ad7SThomas Gleixner */ 55102a982a6SDu, Changbin static bool work_fixup_free(void *addr, enum debug_obj_state state) 552dc186ad7SThomas Gleixner { 553dc186ad7SThomas Gleixner struct work_struct *work = addr; 554dc186ad7SThomas Gleixner 555dc186ad7SThomas Gleixner switch (state) { 556dc186ad7SThomas Gleixner case ODEBUG_STATE_ACTIVE: 557dc186ad7SThomas Gleixner cancel_work_sync(work); 558dc186ad7SThomas Gleixner debug_object_free(work, &work_debug_descr); 55902a982a6SDu, Changbin return true; 560dc186ad7SThomas Gleixner default: 56102a982a6SDu, Changbin return false; 562dc186ad7SThomas Gleixner } 563dc186ad7SThomas Gleixner } 564dc186ad7SThomas Gleixner 565f9e62f31SStephen Boyd static const struct debug_obj_descr work_debug_descr = { 566dc186ad7SThomas Gleixner .name = "work_struct", 56799777288SStanislaw Gruszka .debug_hint = work_debug_hint, 568b9fdac7fSDu, Changbin .is_static_object = work_is_static_object, 569dc186ad7SThomas Gleixner .fixup_init = work_fixup_init, 570dc186ad7SThomas Gleixner .fixup_free = work_fixup_free, 571dc186ad7SThomas Gleixner }; 572dc186ad7SThomas Gleixner 573dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work) 574dc186ad7SThomas Gleixner { 575dc186ad7SThomas Gleixner debug_object_activate(work, &work_debug_descr); 576dc186ad7SThomas Gleixner } 577dc186ad7SThomas Gleixner 578dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work) 579dc186ad7SThomas Gleixner { 580dc186ad7SThomas Gleixner debug_object_deactivate(work, &work_debug_descr); 581dc186ad7SThomas Gleixner } 582dc186ad7SThomas Gleixner 583dc186ad7SThomas Gleixner void __init_work(struct work_struct *work, int onstack) 584dc186ad7SThomas Gleixner { 585dc186ad7SThomas Gleixner if (onstack) 586dc186ad7SThomas Gleixner debug_object_init_on_stack(work, &work_debug_descr); 587dc186ad7SThomas Gleixner else 588dc186ad7SThomas Gleixner debug_object_init(work, &work_debug_descr); 589dc186ad7SThomas Gleixner } 590dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(__init_work); 591dc186ad7SThomas Gleixner 592dc186ad7SThomas Gleixner void destroy_work_on_stack(struct work_struct *work) 593dc186ad7SThomas Gleixner { 594dc186ad7SThomas Gleixner debug_object_free(work, &work_debug_descr); 595dc186ad7SThomas Gleixner } 596dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(destroy_work_on_stack); 597dc186ad7SThomas Gleixner 598ea2e64f2SThomas Gleixner void destroy_delayed_work_on_stack(struct delayed_work *work) 599ea2e64f2SThomas Gleixner { 600ea2e64f2SThomas Gleixner destroy_timer_on_stack(&work->timer); 601ea2e64f2SThomas Gleixner debug_object_free(&work->work, &work_debug_descr); 602ea2e64f2SThomas Gleixner } 603ea2e64f2SThomas Gleixner EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack); 604ea2e64f2SThomas Gleixner 605dc186ad7SThomas Gleixner #else 606dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work) { } 607dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work) { } 608dc186ad7SThomas Gleixner #endif 609dc186ad7SThomas Gleixner 6104e8b22bdSLi Bin /** 61167dc8325SCai Huoqing * worker_pool_assign_id - allocate ID and assign it to @pool 6124e8b22bdSLi Bin * @pool: the pool pointer of interest 6134e8b22bdSLi Bin * 6144e8b22bdSLi Bin * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned 6154e8b22bdSLi Bin * successfully, -errno on failure. 6164e8b22bdSLi Bin */ 6179daf9e67STejun Heo static int worker_pool_assign_id(struct worker_pool *pool) 6189daf9e67STejun Heo { 6199daf9e67STejun Heo int ret; 6209daf9e67STejun Heo 62168e13a67SLai Jiangshan lockdep_assert_held(&wq_pool_mutex); 6225bcab335STejun Heo 6234e8b22bdSLi Bin ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE, 6244e8b22bdSLi Bin GFP_KERNEL); 625229641a6STejun Heo if (ret >= 0) { 626e68035fbSTejun Heo pool->id = ret; 627229641a6STejun Heo return 0; 628229641a6STejun Heo } 6299daf9e67STejun Heo return ret; 6309daf9e67STejun Heo } 6319daf9e67STejun Heo 632bd31fb92STejun Heo static struct pool_workqueue __rcu ** 633bd31fb92STejun Heo unbound_pwq_slot(struct workqueue_struct *wq, int cpu) 634bd31fb92STejun Heo { 635bd31fb92STejun Heo if (cpu >= 0) 636bd31fb92STejun Heo return per_cpu_ptr(wq->cpu_pwq, cpu); 637bd31fb92STejun Heo else 638bd31fb92STejun Heo return &wq->dfl_pwq; 639bd31fb92STejun Heo } 640bd31fb92STejun Heo 641bd31fb92STejun Heo /* @cpu < 0 for dfl_pwq */ 642bd31fb92STejun Heo static struct pool_workqueue *unbound_pwq(struct workqueue_struct *wq, int cpu) 643bd31fb92STejun Heo { 644bd31fb92STejun Heo return rcu_dereference_check(*unbound_pwq_slot(wq, cpu), 645bd31fb92STejun Heo lockdep_is_held(&wq_pool_mutex) || 646bd31fb92STejun Heo lockdep_is_held(&wq->mutex)); 647bd31fb92STejun Heo } 648bd31fb92STejun Heo 64973f53c4aSTejun Heo static unsigned int work_color_to_flags(int color) 65073f53c4aSTejun Heo { 65173f53c4aSTejun Heo return color << WORK_STRUCT_COLOR_SHIFT; 65273f53c4aSTejun Heo } 65373f53c4aSTejun Heo 654c4560c2cSLai Jiangshan static int get_work_color(unsigned long work_data) 65573f53c4aSTejun Heo { 656c4560c2cSLai Jiangshan return (work_data >> WORK_STRUCT_COLOR_SHIFT) & 65773f53c4aSTejun Heo ((1 << WORK_STRUCT_COLOR_BITS) - 1); 65873f53c4aSTejun Heo } 65973f53c4aSTejun Heo 66073f53c4aSTejun Heo static int work_next_color(int color) 66173f53c4aSTejun Heo { 66273f53c4aSTejun Heo return (color + 1) % WORK_NR_COLORS; 663a848e3b6SOleg Nesterov } 664a848e3b6SOleg Nesterov 6654594bf15SDavid Howells /* 666112202d9STejun Heo * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data 667112202d9STejun Heo * contain the pointer to the queued pwq. Once execution starts, the flag 6687c3eed5cSTejun Heo * is cleared and the high bits contain OFFQ flags and pool ID. 6697a22ad75STejun Heo * 670112202d9STejun Heo * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling() 671112202d9STejun Heo * and clear_work_data() can be used to set the pwq, pool or clear 672bbb68dfaSTejun Heo * work->data. These functions should only be called while the work is 673bbb68dfaSTejun Heo * owned - ie. while the PENDING bit is set. 6747a22ad75STejun Heo * 675112202d9STejun Heo * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq 6767c3eed5cSTejun Heo * corresponding to a work. Pool is available once the work has been 677112202d9STejun Heo * queued anywhere after initialization until it is sync canceled. pwq is 6787c3eed5cSTejun Heo * available only while the work item is queued. 679bbb68dfaSTejun Heo * 680bbb68dfaSTejun Heo * %WORK_OFFQ_CANCELING is used to mark a work item which is being 681bbb68dfaSTejun Heo * canceled. While being canceled, a work item may have its PENDING set 682bbb68dfaSTejun Heo * but stay off timer and worklist for arbitrarily long and nobody should 683bbb68dfaSTejun Heo * try to steal the PENDING bit. 6844594bf15SDavid Howells */ 6857a22ad75STejun Heo static inline void set_work_data(struct work_struct *work, unsigned long data, 6867a22ad75STejun Heo unsigned long flags) 6877a22ad75STejun Heo { 6886183c009STejun Heo WARN_ON_ONCE(!work_pending(work)); 6897a22ad75STejun Heo atomic_long_set(&work->data, data | flags | work_static(work)); 6907a22ad75STejun Heo } 6917a22ad75STejun Heo 692112202d9STejun Heo static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, 6934690c4abSTejun Heo unsigned long extra_flags) 694365970a1SDavid Howells { 695112202d9STejun Heo set_work_data(work, (unsigned long)pwq, 696112202d9STejun Heo WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags); 697365970a1SDavid Howells } 698365970a1SDavid Howells 6994468a00fSLai Jiangshan static void set_work_pool_and_keep_pending(struct work_struct *work, 7004468a00fSLai Jiangshan int pool_id) 7014468a00fSLai Jiangshan { 7024468a00fSLai Jiangshan set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 7034468a00fSLai Jiangshan WORK_STRUCT_PENDING); 7044468a00fSLai Jiangshan } 7054468a00fSLai Jiangshan 7067c3eed5cSTejun Heo static void set_work_pool_and_clear_pending(struct work_struct *work, 7077c3eed5cSTejun Heo int pool_id) 7084d707b9fSOleg Nesterov { 70923657bb1STejun Heo /* 71023657bb1STejun Heo * The following wmb is paired with the implied mb in 71123657bb1STejun Heo * test_and_set_bit(PENDING) and ensures all updates to @work made 71223657bb1STejun Heo * here are visible to and precede any updates by the next PENDING 71323657bb1STejun Heo * owner. 71423657bb1STejun Heo */ 71523657bb1STejun Heo smp_wmb(); 7167c3eed5cSTejun Heo set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); 717346c09f8SRoman Pen /* 718346c09f8SRoman Pen * The following mb guarantees that previous clear of a PENDING bit 719346c09f8SRoman Pen * will not be reordered with any speculative LOADS or STORES from 720346c09f8SRoman Pen * work->current_func, which is executed afterwards. This possible 7218bdc6201SLiu Song * reordering can lead to a missed execution on attempt to queue 722346c09f8SRoman Pen * the same @work. E.g. consider this case: 723346c09f8SRoman Pen * 724346c09f8SRoman Pen * CPU#0 CPU#1 725346c09f8SRoman Pen * ---------------------------- -------------------------------- 726346c09f8SRoman Pen * 727346c09f8SRoman Pen * 1 STORE event_indicated 728346c09f8SRoman Pen * 2 queue_work_on() { 729346c09f8SRoman Pen * 3 test_and_set_bit(PENDING) 730346c09f8SRoman Pen * 4 } set_..._and_clear_pending() { 731346c09f8SRoman Pen * 5 set_work_data() # clear bit 732346c09f8SRoman Pen * 6 smp_mb() 733346c09f8SRoman Pen * 7 work->current_func() { 734346c09f8SRoman Pen * 8 LOAD event_indicated 735346c09f8SRoman Pen * } 736346c09f8SRoman Pen * 737346c09f8SRoman Pen * Without an explicit full barrier speculative LOAD on line 8 can 738346c09f8SRoman Pen * be executed before CPU#0 does STORE on line 1. If that happens, 739346c09f8SRoman Pen * CPU#0 observes the PENDING bit is still set and new execution of 740346c09f8SRoman Pen * a @work is not queued in a hope, that CPU#1 will eventually 741346c09f8SRoman Pen * finish the queued @work. Meanwhile CPU#1 does not see 742346c09f8SRoman Pen * event_indicated is set, because speculative LOAD was executed 743346c09f8SRoman Pen * before actual STORE. 744346c09f8SRoman Pen */ 745346c09f8SRoman Pen smp_mb(); 7464d707b9fSOleg Nesterov } 7474d707b9fSOleg Nesterov 7487a22ad75STejun Heo static void clear_work_data(struct work_struct *work) 749365970a1SDavid Howells { 7507c3eed5cSTejun Heo smp_wmb(); /* see set_work_pool_and_clear_pending() */ 7517c3eed5cSTejun Heo set_work_data(work, WORK_STRUCT_NO_POOL, 0); 7527a22ad75STejun Heo } 7537a22ad75STejun Heo 754afa4bb77SLinus Torvalds static inline struct pool_workqueue *work_struct_pwq(unsigned long data) 755afa4bb77SLinus Torvalds { 756afa4bb77SLinus Torvalds return (struct pool_workqueue *)(data & WORK_STRUCT_WQ_DATA_MASK); 757afa4bb77SLinus Torvalds } 758afa4bb77SLinus Torvalds 759112202d9STejun Heo static struct pool_workqueue *get_work_pwq(struct work_struct *work) 7607a22ad75STejun Heo { 761e120153dSTejun Heo unsigned long data = atomic_long_read(&work->data); 7627a22ad75STejun Heo 763112202d9STejun Heo if (data & WORK_STRUCT_PWQ) 764afa4bb77SLinus Torvalds return work_struct_pwq(data); 765e120153dSTejun Heo else 766e120153dSTejun Heo return NULL; 7677a22ad75STejun Heo } 7687a22ad75STejun Heo 7697c3eed5cSTejun Heo /** 7707c3eed5cSTejun Heo * get_work_pool - return the worker_pool a given work was associated with 7717c3eed5cSTejun Heo * @work: the work item of interest 7727c3eed5cSTejun Heo * 77368e13a67SLai Jiangshan * Pools are created and destroyed under wq_pool_mutex, and allows read 77424acfb71SThomas Gleixner * access under RCU read lock. As such, this function should be 77524acfb71SThomas Gleixner * called under wq_pool_mutex or inside of a rcu_read_lock() region. 776fa1b54e6STejun Heo * 777fa1b54e6STejun Heo * All fields of the returned pool are accessible as long as the above 778fa1b54e6STejun Heo * mentioned locking is in effect. If the returned pool needs to be used 779fa1b54e6STejun Heo * beyond the critical section, the caller is responsible for ensuring the 780fa1b54e6STejun Heo * returned pool is and stays online. 781d185af30SYacine Belkadi * 782d185af30SYacine Belkadi * Return: The worker_pool @work was last associated with. %NULL if none. 7837c3eed5cSTejun Heo */ 7847c3eed5cSTejun Heo static struct worker_pool *get_work_pool(struct work_struct *work) 7857a22ad75STejun Heo { 786e120153dSTejun Heo unsigned long data = atomic_long_read(&work->data); 7877c3eed5cSTejun Heo int pool_id; 7887a22ad75STejun Heo 78968e13a67SLai Jiangshan assert_rcu_or_pool_mutex(); 790fa1b54e6STejun Heo 791112202d9STejun Heo if (data & WORK_STRUCT_PWQ) 792afa4bb77SLinus Torvalds return work_struct_pwq(data)->pool; 7937a22ad75STejun Heo 7947c3eed5cSTejun Heo pool_id = data >> WORK_OFFQ_POOL_SHIFT; 7957c3eed5cSTejun Heo if (pool_id == WORK_OFFQ_POOL_NONE) 7967a22ad75STejun Heo return NULL; 7977a22ad75STejun Heo 798fa1b54e6STejun Heo return idr_find(&worker_pool_idr, pool_id); 7997c3eed5cSTejun Heo } 8007c3eed5cSTejun Heo 8017c3eed5cSTejun Heo /** 8027c3eed5cSTejun Heo * get_work_pool_id - return the worker pool ID a given work is associated with 8037c3eed5cSTejun Heo * @work: the work item of interest 8047c3eed5cSTejun Heo * 805d185af30SYacine Belkadi * Return: The worker_pool ID @work was last associated with. 8067c3eed5cSTejun Heo * %WORK_OFFQ_POOL_NONE if none. 8077c3eed5cSTejun Heo */ 8087c3eed5cSTejun Heo static int get_work_pool_id(struct work_struct *work) 8097c3eed5cSTejun Heo { 81054d5b7d0SLai Jiangshan unsigned long data = atomic_long_read(&work->data); 8117c3eed5cSTejun Heo 812112202d9STejun Heo if (data & WORK_STRUCT_PWQ) 813afa4bb77SLinus Torvalds return work_struct_pwq(data)->pool->id; 81454d5b7d0SLai Jiangshan 81554d5b7d0SLai Jiangshan return data >> WORK_OFFQ_POOL_SHIFT; 8167c3eed5cSTejun Heo } 8177c3eed5cSTejun Heo 818bbb68dfaSTejun Heo static void mark_work_canceling(struct work_struct *work) 819bbb68dfaSTejun Heo { 8207c3eed5cSTejun Heo unsigned long pool_id = get_work_pool_id(work); 821bbb68dfaSTejun Heo 8227c3eed5cSTejun Heo pool_id <<= WORK_OFFQ_POOL_SHIFT; 8237c3eed5cSTejun Heo set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING); 824bbb68dfaSTejun Heo } 825bbb68dfaSTejun Heo 826bbb68dfaSTejun Heo static bool work_is_canceling(struct work_struct *work) 827bbb68dfaSTejun Heo { 828bbb68dfaSTejun Heo unsigned long data = atomic_long_read(&work->data); 829bbb68dfaSTejun Heo 830112202d9STejun Heo return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING); 831bbb68dfaSTejun Heo } 832bbb68dfaSTejun Heo 833e22bee78STejun Heo /* 8343270476aSTejun Heo * Policy functions. These define the policies on how the global worker 8353270476aSTejun Heo * pools are managed. Unless noted otherwise, these functions assume that 836d565ed63STejun Heo * they're being called with pool->lock held. 837e22bee78STejun Heo */ 838e22bee78STejun Heo 839e22bee78STejun Heo /* 840e22bee78STejun Heo * Need to wake up a worker? Called from anything but currently 841e22bee78STejun Heo * running workers. 842974271c4STejun Heo * 843974271c4STejun Heo * Note that, because unbound workers never contribute to nr_running, this 844706026c2STejun Heo * function will always return %true for unbound pools as long as the 845974271c4STejun Heo * worklist isn't empty. 846e22bee78STejun Heo */ 84763d95a91STejun Heo static bool need_more_worker(struct worker_pool *pool) 848e22bee78STejun Heo { 8490219a352STejun Heo return !list_empty(&pool->worklist) && !pool->nr_running; 850e22bee78STejun Heo } 851e22bee78STejun Heo 852e22bee78STejun Heo /* Can I start working? Called from busy but !running workers. */ 85363d95a91STejun Heo static bool may_start_working(struct worker_pool *pool) 854e22bee78STejun Heo { 85563d95a91STejun Heo return pool->nr_idle; 856e22bee78STejun Heo } 857e22bee78STejun Heo 858e22bee78STejun Heo /* Do I need to keep working? Called from currently running workers. */ 85963d95a91STejun Heo static bool keep_working(struct worker_pool *pool) 860e22bee78STejun Heo { 861bc35f7efSLai Jiangshan return !list_empty(&pool->worklist) && (pool->nr_running <= 1); 862e22bee78STejun Heo } 863e22bee78STejun Heo 864e22bee78STejun Heo /* Do we need a new worker? Called from manager. */ 86563d95a91STejun Heo static bool need_to_create_worker(struct worker_pool *pool) 866e22bee78STejun Heo { 86763d95a91STejun Heo return need_more_worker(pool) && !may_start_working(pool); 868e22bee78STejun Heo } 869e22bee78STejun Heo 870e22bee78STejun Heo /* Do we have too many workers and should some go away? */ 87163d95a91STejun Heo static bool too_many_workers(struct worker_pool *pool) 872e22bee78STejun Heo { 873692b4825STejun Heo bool managing = pool->flags & POOL_MANAGER_ACTIVE; 87463d95a91STejun Heo int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ 87563d95a91STejun Heo int nr_busy = pool->nr_workers - nr_idle; 876e22bee78STejun Heo 877e22bee78STejun Heo return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; 878e22bee78STejun Heo } 879e22bee78STejun Heo 8804690c4abSTejun Heo /** 881e22bee78STejun Heo * worker_set_flags - set worker flags and adjust nr_running accordingly 882cb444766STejun Heo * @worker: self 883d302f017STejun Heo * @flags: flags to set 884d302f017STejun Heo * 885228f1d00SLai Jiangshan * Set @flags in @worker->flags and adjust nr_running accordingly. 886d302f017STejun Heo */ 887228f1d00SLai Jiangshan static inline void worker_set_flags(struct worker *worker, unsigned int flags) 888d302f017STejun Heo { 889bd7bdd43STejun Heo struct worker_pool *pool = worker->pool; 890e22bee78STejun Heo 891bc8b50c2STejun Heo lockdep_assert_held(&pool->lock); 892cb444766STejun Heo 893228f1d00SLai Jiangshan /* If transitioning into NOT_RUNNING, adjust nr_running. */ 894e22bee78STejun Heo if ((flags & WORKER_NOT_RUNNING) && 895e22bee78STejun Heo !(worker->flags & WORKER_NOT_RUNNING)) { 896bc35f7efSLai Jiangshan pool->nr_running--; 897e22bee78STejun Heo } 898e22bee78STejun Heo 899d302f017STejun Heo worker->flags |= flags; 900d302f017STejun Heo } 901d302f017STejun Heo 902d302f017STejun Heo /** 903e22bee78STejun Heo * worker_clr_flags - clear worker flags and adjust nr_running accordingly 904cb444766STejun Heo * @worker: self 905d302f017STejun Heo * @flags: flags to clear 906d302f017STejun Heo * 907e22bee78STejun Heo * Clear @flags in @worker->flags and adjust nr_running accordingly. 908d302f017STejun Heo */ 909d302f017STejun Heo static inline void worker_clr_flags(struct worker *worker, unsigned int flags) 910d302f017STejun Heo { 91163d95a91STejun Heo struct worker_pool *pool = worker->pool; 912e22bee78STejun Heo unsigned int oflags = worker->flags; 913e22bee78STejun Heo 914bc8b50c2STejun Heo lockdep_assert_held(&pool->lock); 915cb444766STejun Heo 916d302f017STejun Heo worker->flags &= ~flags; 917e22bee78STejun Heo 91842c025f3STejun Heo /* 91942c025f3STejun Heo * If transitioning out of NOT_RUNNING, increment nr_running. Note 92042c025f3STejun Heo * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask 92142c025f3STejun Heo * of multiple flags, not a single flag. 92242c025f3STejun Heo */ 923e22bee78STejun Heo if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) 924e22bee78STejun Heo if (!(worker->flags & WORKER_NOT_RUNNING)) 925bc35f7efSLai Jiangshan pool->nr_running++; 926d302f017STejun Heo } 927d302f017STejun Heo 928797e8345STejun Heo /* Return the first idle worker. Called with pool->lock held. */ 929797e8345STejun Heo static struct worker *first_idle_worker(struct worker_pool *pool) 930797e8345STejun Heo { 931797e8345STejun Heo if (unlikely(list_empty(&pool->idle_list))) 932797e8345STejun Heo return NULL; 933797e8345STejun Heo 934797e8345STejun Heo return list_first_entry(&pool->idle_list, struct worker, entry); 935797e8345STejun Heo } 936797e8345STejun Heo 937797e8345STejun Heo /** 938797e8345STejun Heo * worker_enter_idle - enter idle state 939797e8345STejun Heo * @worker: worker which is entering idle state 940797e8345STejun Heo * 941797e8345STejun Heo * @worker is entering idle state. Update stats and idle timer if 942797e8345STejun Heo * necessary. 943797e8345STejun Heo * 944797e8345STejun Heo * LOCKING: 945797e8345STejun Heo * raw_spin_lock_irq(pool->lock). 946797e8345STejun Heo */ 947797e8345STejun Heo static void worker_enter_idle(struct worker *worker) 948797e8345STejun Heo { 949797e8345STejun Heo struct worker_pool *pool = worker->pool; 950797e8345STejun Heo 951797e8345STejun Heo if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) || 952797e8345STejun Heo WARN_ON_ONCE(!list_empty(&worker->entry) && 953797e8345STejun Heo (worker->hentry.next || worker->hentry.pprev))) 954797e8345STejun Heo return; 955797e8345STejun Heo 956797e8345STejun Heo /* can't use worker_set_flags(), also called from create_worker() */ 957797e8345STejun Heo worker->flags |= WORKER_IDLE; 958797e8345STejun Heo pool->nr_idle++; 959797e8345STejun Heo worker->last_active = jiffies; 960797e8345STejun Heo 961797e8345STejun Heo /* idle_list is LIFO */ 962797e8345STejun Heo list_add(&worker->entry, &pool->idle_list); 963797e8345STejun Heo 964797e8345STejun Heo if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) 965797e8345STejun Heo mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); 966797e8345STejun Heo 967797e8345STejun Heo /* Sanity check nr_running. */ 968797e8345STejun Heo WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running); 969797e8345STejun Heo } 970797e8345STejun Heo 971797e8345STejun Heo /** 972797e8345STejun Heo * worker_leave_idle - leave idle state 973797e8345STejun Heo * @worker: worker which is leaving idle state 974797e8345STejun Heo * 975797e8345STejun Heo * @worker is leaving idle state. Update stats. 976797e8345STejun Heo * 977797e8345STejun Heo * LOCKING: 978797e8345STejun Heo * raw_spin_lock_irq(pool->lock). 979797e8345STejun Heo */ 980797e8345STejun Heo static void worker_leave_idle(struct worker *worker) 981797e8345STejun Heo { 982797e8345STejun Heo struct worker_pool *pool = worker->pool; 983797e8345STejun Heo 984797e8345STejun Heo if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE))) 985797e8345STejun Heo return; 986797e8345STejun Heo worker_clr_flags(worker, WORKER_IDLE); 987797e8345STejun Heo pool->nr_idle--; 988797e8345STejun Heo list_del_init(&worker->entry); 989797e8345STejun Heo } 990797e8345STejun Heo 991797e8345STejun Heo /** 992797e8345STejun Heo * find_worker_executing_work - find worker which is executing a work 993797e8345STejun Heo * @pool: pool of interest 994797e8345STejun Heo * @work: work to find worker for 995797e8345STejun Heo * 996797e8345STejun Heo * Find a worker which is executing @work on @pool by searching 997797e8345STejun Heo * @pool->busy_hash which is keyed by the address of @work. For a worker 998797e8345STejun Heo * to match, its current execution should match the address of @work and 999797e8345STejun Heo * its work function. This is to avoid unwanted dependency between 1000797e8345STejun Heo * unrelated work executions through a work item being recycled while still 1001797e8345STejun Heo * being executed. 1002797e8345STejun Heo * 1003797e8345STejun Heo * This is a bit tricky. A work item may be freed once its execution 1004797e8345STejun Heo * starts and nothing prevents the freed area from being recycled for 1005797e8345STejun Heo * another work item. If the same work item address ends up being reused 1006797e8345STejun Heo * before the original execution finishes, workqueue will identify the 1007797e8345STejun Heo * recycled work item as currently executing and make it wait until the 1008797e8345STejun Heo * current execution finishes, introducing an unwanted dependency. 1009797e8345STejun Heo * 1010797e8345STejun Heo * This function checks the work item address and work function to avoid 1011797e8345STejun Heo * false positives. Note that this isn't complete as one may construct a 1012797e8345STejun Heo * work function which can introduce dependency onto itself through a 1013797e8345STejun Heo * recycled work item. Well, if somebody wants to shoot oneself in the 1014797e8345STejun Heo * foot that badly, there's only so much we can do, and if such deadlock 1015797e8345STejun Heo * actually occurs, it should be easy to locate the culprit work function. 1016797e8345STejun Heo * 1017797e8345STejun Heo * CONTEXT: 1018797e8345STejun Heo * raw_spin_lock_irq(pool->lock). 1019797e8345STejun Heo * 1020797e8345STejun Heo * Return: 1021797e8345STejun Heo * Pointer to worker which is executing @work if found, %NULL 1022797e8345STejun Heo * otherwise. 1023797e8345STejun Heo */ 1024797e8345STejun Heo static struct worker *find_worker_executing_work(struct worker_pool *pool, 1025797e8345STejun Heo struct work_struct *work) 1026797e8345STejun Heo { 1027797e8345STejun Heo struct worker *worker; 1028797e8345STejun Heo 1029797e8345STejun Heo hash_for_each_possible(pool->busy_hash, worker, hentry, 1030797e8345STejun Heo (unsigned long)work) 1031797e8345STejun Heo if (worker->current_work == work && 1032797e8345STejun Heo worker->current_func == work->func) 1033797e8345STejun Heo return worker; 1034797e8345STejun Heo 1035797e8345STejun Heo return NULL; 1036797e8345STejun Heo } 1037797e8345STejun Heo 1038797e8345STejun Heo /** 1039797e8345STejun Heo * move_linked_works - move linked works to a list 1040797e8345STejun Heo * @work: start of series of works to be scheduled 1041797e8345STejun Heo * @head: target list to append @work to 1042797e8345STejun Heo * @nextp: out parameter for nested worklist walking 1043797e8345STejun Heo * 1044873eaca6STejun Heo * Schedule linked works starting from @work to @head. Work series to be 1045873eaca6STejun Heo * scheduled starts at @work and includes any consecutive work with 1046873eaca6STejun Heo * WORK_STRUCT_LINKED set in its predecessor. See assign_work() for details on 1047873eaca6STejun Heo * @nextp. 1048797e8345STejun Heo * 1049797e8345STejun Heo * CONTEXT: 1050797e8345STejun Heo * raw_spin_lock_irq(pool->lock). 1051797e8345STejun Heo */ 1052797e8345STejun Heo static void move_linked_works(struct work_struct *work, struct list_head *head, 1053797e8345STejun Heo struct work_struct **nextp) 1054797e8345STejun Heo { 1055797e8345STejun Heo struct work_struct *n; 1056797e8345STejun Heo 1057797e8345STejun Heo /* 1058797e8345STejun Heo * Linked worklist will always end before the end of the list, 1059797e8345STejun Heo * use NULL for list head. 1060797e8345STejun Heo */ 1061797e8345STejun Heo list_for_each_entry_safe_from(work, n, NULL, entry) { 1062797e8345STejun Heo list_move_tail(&work->entry, head); 1063797e8345STejun Heo if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 1064797e8345STejun Heo break; 1065797e8345STejun Heo } 1066797e8345STejun Heo 1067797e8345STejun Heo /* 1068797e8345STejun Heo * If we're already inside safe list traversal and have moved 1069797e8345STejun Heo * multiple works to the scheduled queue, the next position 1070797e8345STejun Heo * needs to be updated. 1071797e8345STejun Heo */ 1072797e8345STejun Heo if (nextp) 1073797e8345STejun Heo *nextp = n; 1074797e8345STejun Heo } 1075797e8345STejun Heo 1076797e8345STejun Heo /** 1077873eaca6STejun Heo * assign_work - assign a work item and its linked work items to a worker 1078873eaca6STejun Heo * @work: work to assign 1079873eaca6STejun Heo * @worker: worker to assign to 1080873eaca6STejun Heo * @nextp: out parameter for nested worklist walking 1081873eaca6STejun Heo * 1082873eaca6STejun Heo * Assign @work and its linked work items to @worker. If @work is already being 1083873eaca6STejun Heo * executed by another worker in the same pool, it'll be punted there. 1084873eaca6STejun Heo * 1085873eaca6STejun Heo * If @nextp is not NULL, it's updated to point to the next work of the last 1086873eaca6STejun Heo * scheduled work. This allows assign_work() to be nested inside 1087873eaca6STejun Heo * list_for_each_entry_safe(). 1088873eaca6STejun Heo * 1089873eaca6STejun Heo * Returns %true if @work was successfully assigned to @worker. %false if @work 1090873eaca6STejun Heo * was punted to another worker already executing it. 1091873eaca6STejun Heo */ 1092873eaca6STejun Heo static bool assign_work(struct work_struct *work, struct worker *worker, 1093873eaca6STejun Heo struct work_struct **nextp) 1094873eaca6STejun Heo { 1095873eaca6STejun Heo struct worker_pool *pool = worker->pool; 1096873eaca6STejun Heo struct worker *collision; 1097873eaca6STejun Heo 1098873eaca6STejun Heo lockdep_assert_held(&pool->lock); 1099873eaca6STejun Heo 1100873eaca6STejun Heo /* 1101873eaca6STejun Heo * A single work shouldn't be executed concurrently by multiple workers. 1102873eaca6STejun Heo * __queue_work() ensures that @work doesn't jump to a different pool 1103873eaca6STejun Heo * while still running in the previous pool. Here, we should ensure that 1104873eaca6STejun Heo * @work is not executed concurrently by multiple workers from the same 1105873eaca6STejun Heo * pool. Check whether anyone is already processing the work. If so, 1106873eaca6STejun Heo * defer the work to the currently executing one. 1107873eaca6STejun Heo */ 1108873eaca6STejun Heo collision = find_worker_executing_work(pool, work); 1109873eaca6STejun Heo if (unlikely(collision)) { 1110873eaca6STejun Heo move_linked_works(work, &collision->scheduled, nextp); 1111873eaca6STejun Heo return false; 1112873eaca6STejun Heo } 1113873eaca6STejun Heo 1114873eaca6STejun Heo move_linked_works(work, &worker->scheduled, nextp); 1115873eaca6STejun Heo return true; 1116873eaca6STejun Heo } 1117873eaca6STejun Heo 1118873eaca6STejun Heo /** 11190219a352STejun Heo * kick_pool - wake up an idle worker if necessary 11200219a352STejun Heo * @pool: pool to kick 1121797e8345STejun Heo * 11220219a352STejun Heo * @pool may have pending work items. Wake up worker if necessary. Returns 11230219a352STejun Heo * whether a worker was woken up. 1124797e8345STejun Heo */ 11250219a352STejun Heo static bool kick_pool(struct worker_pool *pool) 1126797e8345STejun Heo { 1127797e8345STejun Heo struct worker *worker = first_idle_worker(pool); 11288639ecebSTejun Heo struct task_struct *p; 1129797e8345STejun Heo 11300219a352STejun Heo lockdep_assert_held(&pool->lock); 11310219a352STejun Heo 11320219a352STejun Heo if (!need_more_worker(pool) || !worker) 11330219a352STejun Heo return false; 11340219a352STejun Heo 11358639ecebSTejun Heo p = worker->task; 11368639ecebSTejun Heo 11378639ecebSTejun Heo #ifdef CONFIG_SMP 11388639ecebSTejun Heo /* 11398639ecebSTejun Heo * Idle @worker is about to execute @work and waking up provides an 11408639ecebSTejun Heo * opportunity to migrate @worker at a lower cost by setting the task's 11418639ecebSTejun Heo * wake_cpu field. Let's see if we want to move @worker to improve 11428639ecebSTejun Heo * execution locality. 11438639ecebSTejun Heo * 11448639ecebSTejun Heo * We're waking the worker that went idle the latest and there's some 11458639ecebSTejun Heo * chance that @worker is marked idle but hasn't gone off CPU yet. If 11468639ecebSTejun Heo * so, setting the wake_cpu won't do anything. As this is a best-effort 11478639ecebSTejun Heo * optimization and the race window is narrow, let's leave as-is for 11488639ecebSTejun Heo * now. If this becomes pronounced, we can skip over workers which are 11498639ecebSTejun Heo * still on cpu when picking an idle worker. 11508639ecebSTejun Heo * 11518639ecebSTejun Heo * If @pool has non-strict affinity, @worker might have ended up outside 11528639ecebSTejun Heo * its affinity scope. Repatriate. 11538639ecebSTejun Heo */ 11548639ecebSTejun Heo if (!pool->attrs->affn_strict && 11558639ecebSTejun Heo !cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) { 11568639ecebSTejun Heo struct work_struct *work = list_first_entry(&pool->worklist, 11578639ecebSTejun Heo struct work_struct, entry); 11588639ecebSTejun Heo p->wake_cpu = cpumask_any_distribute(pool->attrs->__pod_cpumask); 11598639ecebSTejun Heo get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++; 11608639ecebSTejun Heo } 11618639ecebSTejun Heo #endif 11628639ecebSTejun Heo wake_up_process(p); 11630219a352STejun Heo return true; 1164797e8345STejun Heo } 1165797e8345STejun Heo 116663638450STejun Heo #ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT 116763638450STejun Heo 116863638450STejun Heo /* 116963638450STejun Heo * Concurrency-managed per-cpu work items that hog CPU for longer than 117063638450STejun Heo * wq_cpu_intensive_thresh_us trigger the automatic CPU_INTENSIVE mechanism, 117163638450STejun Heo * which prevents them from stalling other concurrency-managed work items. If a 117263638450STejun Heo * work function keeps triggering this mechanism, it's likely that the work item 117363638450STejun Heo * should be using an unbound workqueue instead. 117463638450STejun Heo * 117563638450STejun Heo * wq_cpu_intensive_report() tracks work functions which trigger such conditions 117663638450STejun Heo * and report them so that they can be examined and converted to use unbound 117763638450STejun Heo * workqueues as appropriate. To avoid flooding the console, each violating work 117863638450STejun Heo * function is tracked and reported with exponential backoff. 117963638450STejun Heo */ 118063638450STejun Heo #define WCI_MAX_ENTS 128 118163638450STejun Heo 118263638450STejun Heo struct wci_ent { 118363638450STejun Heo work_func_t func; 118463638450STejun Heo atomic64_t cnt; 118563638450STejun Heo struct hlist_node hash_node; 118663638450STejun Heo }; 118763638450STejun Heo 118863638450STejun Heo static struct wci_ent wci_ents[WCI_MAX_ENTS]; 118963638450STejun Heo static int wci_nr_ents; 119063638450STejun Heo static DEFINE_RAW_SPINLOCK(wci_lock); 119163638450STejun Heo static DEFINE_HASHTABLE(wci_hash, ilog2(WCI_MAX_ENTS)); 119263638450STejun Heo 119363638450STejun Heo static struct wci_ent *wci_find_ent(work_func_t func) 119463638450STejun Heo { 119563638450STejun Heo struct wci_ent *ent; 119663638450STejun Heo 119763638450STejun Heo hash_for_each_possible_rcu(wci_hash, ent, hash_node, 119863638450STejun Heo (unsigned long)func) { 119963638450STejun Heo if (ent->func == func) 120063638450STejun Heo return ent; 120163638450STejun Heo } 120263638450STejun Heo return NULL; 120363638450STejun Heo } 120463638450STejun Heo 120563638450STejun Heo static void wq_cpu_intensive_report(work_func_t func) 120663638450STejun Heo { 120763638450STejun Heo struct wci_ent *ent; 120863638450STejun Heo 120963638450STejun Heo restart: 121063638450STejun Heo ent = wci_find_ent(func); 121163638450STejun Heo if (ent) { 121263638450STejun Heo u64 cnt; 121363638450STejun Heo 121463638450STejun Heo /* 121563638450STejun Heo * Start reporting from the fourth time and back off 121663638450STejun Heo * exponentially. 121763638450STejun Heo */ 121863638450STejun Heo cnt = atomic64_inc_return_relaxed(&ent->cnt); 121963638450STejun Heo if (cnt >= 4 && is_power_of_2(cnt)) 122063638450STejun Heo printk_deferred(KERN_WARNING "workqueue: %ps hogged CPU for >%luus %llu times, consider switching to WQ_UNBOUND\n", 122163638450STejun Heo ent->func, wq_cpu_intensive_thresh_us, 122263638450STejun Heo atomic64_read(&ent->cnt)); 122363638450STejun Heo return; 122463638450STejun Heo } 122563638450STejun Heo 122663638450STejun Heo /* 122763638450STejun Heo * @func is a new violation. Allocate a new entry for it. If wcn_ents[] 122863638450STejun Heo * is exhausted, something went really wrong and we probably made enough 122963638450STejun Heo * noise already. 123063638450STejun Heo */ 123163638450STejun Heo if (wci_nr_ents >= WCI_MAX_ENTS) 123263638450STejun Heo return; 123363638450STejun Heo 123463638450STejun Heo raw_spin_lock(&wci_lock); 123563638450STejun Heo 123663638450STejun Heo if (wci_nr_ents >= WCI_MAX_ENTS) { 123763638450STejun Heo raw_spin_unlock(&wci_lock); 123863638450STejun Heo return; 123963638450STejun Heo } 124063638450STejun Heo 124163638450STejun Heo if (wci_find_ent(func)) { 124263638450STejun Heo raw_spin_unlock(&wci_lock); 124363638450STejun Heo goto restart; 124463638450STejun Heo } 124563638450STejun Heo 124663638450STejun Heo ent = &wci_ents[wci_nr_ents++]; 124763638450STejun Heo ent->func = func; 124863638450STejun Heo atomic64_set(&ent->cnt, 1); 124963638450STejun Heo hash_add_rcu(wci_hash, &ent->hash_node, (unsigned long)func); 125063638450STejun Heo 125163638450STejun Heo raw_spin_unlock(&wci_lock); 125263638450STejun Heo } 125363638450STejun Heo 125463638450STejun Heo #else /* CONFIG_WQ_CPU_INTENSIVE_REPORT */ 125563638450STejun Heo static void wq_cpu_intensive_report(work_func_t func) {} 125663638450STejun Heo #endif /* CONFIG_WQ_CPU_INTENSIVE_REPORT */ 125763638450STejun Heo 1258c54d5046STejun Heo /** 12591da177e4SLinus Torvalds * wq_worker_running - a worker is running again 12601da177e4SLinus Torvalds * @task: task waking up 12611da177e4SLinus Torvalds * 12621da177e4SLinus Torvalds * This function is called when a worker returns from schedule() 12631da177e4SLinus Torvalds */ 12641da177e4SLinus Torvalds void wq_worker_running(struct task_struct *task) 12651da177e4SLinus Torvalds { 12661da177e4SLinus Torvalds struct worker *worker = kthread_data(task); 12671da177e4SLinus Torvalds 1268c8f6219bSZqiang if (!READ_ONCE(worker->sleeping)) 12691da177e4SLinus Torvalds return; 12701da177e4SLinus Torvalds 12711da177e4SLinus Torvalds /* 12721da177e4SLinus Torvalds * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check 12731da177e4SLinus Torvalds * and the nr_running increment below, we may ruin the nr_running reset 12741da177e4SLinus Torvalds * and leave with an unexpected pool->nr_running == 1 on the newly unbound 12751da177e4SLinus Torvalds * pool. Protect against such race. 12761da177e4SLinus Torvalds */ 12771da177e4SLinus Torvalds preempt_disable(); 12781da177e4SLinus Torvalds if (!(worker->flags & WORKER_NOT_RUNNING)) 12791da177e4SLinus Torvalds worker->pool->nr_running++; 12801da177e4SLinus Torvalds preempt_enable(); 1281616db877STejun Heo 1282616db877STejun Heo /* 1283616db877STejun Heo * CPU intensive auto-detection cares about how long a work item hogged 1284616db877STejun Heo * CPU without sleeping. Reset the starting timestamp on wakeup. 1285616db877STejun Heo */ 1286616db877STejun Heo worker->current_at = worker->task->se.sum_exec_runtime; 1287616db877STejun Heo 1288c8f6219bSZqiang WRITE_ONCE(worker->sleeping, 0); 12891da177e4SLinus Torvalds } 12901da177e4SLinus Torvalds 12911da177e4SLinus Torvalds /** 12921da177e4SLinus Torvalds * wq_worker_sleeping - a worker is going to sleep 12931da177e4SLinus Torvalds * @task: task going to sleep 12941da177e4SLinus Torvalds * 12951da177e4SLinus Torvalds * This function is called from schedule() when a busy worker is 12961da177e4SLinus Torvalds * going to sleep. 12971da177e4SLinus Torvalds */ 12981da177e4SLinus Torvalds void wq_worker_sleeping(struct task_struct *task) 12991da177e4SLinus Torvalds { 13001da177e4SLinus Torvalds struct worker *worker = kthread_data(task); 13011da177e4SLinus Torvalds struct worker_pool *pool; 13021da177e4SLinus Torvalds 13031da177e4SLinus Torvalds /* 13041da177e4SLinus Torvalds * Rescuers, which may not have all the fields set up like normal 13051da177e4SLinus Torvalds * workers, also reach here, let's not access anything before 13061da177e4SLinus Torvalds * checking NOT_RUNNING. 13071da177e4SLinus Torvalds */ 13081da177e4SLinus Torvalds if (worker->flags & WORKER_NOT_RUNNING) 13091da177e4SLinus Torvalds return; 13101da177e4SLinus Torvalds 13111da177e4SLinus Torvalds pool = worker->pool; 13121da177e4SLinus Torvalds 13131da177e4SLinus Torvalds /* Return if preempted before wq_worker_running() was reached */ 1314c8f6219bSZqiang if (READ_ONCE(worker->sleeping)) 13151da177e4SLinus Torvalds return; 13161da177e4SLinus Torvalds 1317c8f6219bSZqiang WRITE_ONCE(worker->sleeping, 1); 13181da177e4SLinus Torvalds raw_spin_lock_irq(&pool->lock); 13191da177e4SLinus Torvalds 13201da177e4SLinus Torvalds /* 13211da177e4SLinus Torvalds * Recheck in case unbind_workers() preempted us. We don't 13221da177e4SLinus Torvalds * want to decrement nr_running after the worker is unbound 13231da177e4SLinus Torvalds * and nr_running has been reset. 13241da177e4SLinus Torvalds */ 13251da177e4SLinus Torvalds if (worker->flags & WORKER_NOT_RUNNING) { 13261da177e4SLinus Torvalds raw_spin_unlock_irq(&pool->lock); 13271da177e4SLinus Torvalds return; 13281da177e4SLinus Torvalds } 13291da177e4SLinus Torvalds 13301da177e4SLinus Torvalds pool->nr_running--; 13310219a352STejun Heo if (kick_pool(pool)) 1332725e8ec5STejun Heo worker->current_pwq->stats[PWQ_STAT_CM_WAKEUP]++; 13330219a352STejun Heo 13341da177e4SLinus Torvalds raw_spin_unlock_irq(&pool->lock); 13351da177e4SLinus Torvalds } 13361da177e4SLinus Torvalds 13371da177e4SLinus Torvalds /** 1338616db877STejun Heo * wq_worker_tick - a scheduler tick occurred while a kworker is running 1339616db877STejun Heo * @task: task currently running 1340616db877STejun Heo * 1341616db877STejun Heo * Called from scheduler_tick(). We're in the IRQ context and the current 1342616db877STejun Heo * worker's fields which follow the 'K' locking rule can be accessed safely. 1343616db877STejun Heo */ 1344616db877STejun Heo void wq_worker_tick(struct task_struct *task) 1345616db877STejun Heo { 1346616db877STejun Heo struct worker *worker = kthread_data(task); 1347616db877STejun Heo struct pool_workqueue *pwq = worker->current_pwq; 1348616db877STejun Heo struct worker_pool *pool = worker->pool; 1349616db877STejun Heo 1350616db877STejun Heo if (!pwq) 1351616db877STejun Heo return; 1352616db877STejun Heo 13538a1dd1e5STejun Heo pwq->stats[PWQ_STAT_CPU_TIME] += TICK_USEC; 13548a1dd1e5STejun Heo 135518c8ae81SZqiang if (!wq_cpu_intensive_thresh_us) 135618c8ae81SZqiang return; 135718c8ae81SZqiang 1358616db877STejun Heo /* 1359616db877STejun Heo * If the current worker is concurrency managed and hogged the CPU for 1360616db877STejun Heo * longer than wq_cpu_intensive_thresh_us, it's automatically marked 1361616db877STejun Heo * CPU_INTENSIVE to avoid stalling other concurrency-managed work items. 1362c8f6219bSZqiang * 1363c8f6219bSZqiang * Set @worker->sleeping means that @worker is in the process of 1364c8f6219bSZqiang * switching out voluntarily and won't be contributing to 1365c8f6219bSZqiang * @pool->nr_running until it wakes up. As wq_worker_sleeping() also 1366c8f6219bSZqiang * decrements ->nr_running, setting CPU_INTENSIVE here can lead to 1367c8f6219bSZqiang * double decrements. The task is releasing the CPU anyway. Let's skip. 1368c8f6219bSZqiang * We probably want to make this prettier in the future. 1369616db877STejun Heo */ 1370c8f6219bSZqiang if ((worker->flags & WORKER_NOT_RUNNING) || READ_ONCE(worker->sleeping) || 1371616db877STejun Heo worker->task->se.sum_exec_runtime - worker->current_at < 1372616db877STejun Heo wq_cpu_intensive_thresh_us * NSEC_PER_USEC) 1373616db877STejun Heo return; 1374616db877STejun Heo 1375616db877STejun Heo raw_spin_lock(&pool->lock); 1376616db877STejun Heo 1377616db877STejun Heo worker_set_flags(worker, WORKER_CPU_INTENSIVE); 137863638450STejun Heo wq_cpu_intensive_report(worker->current_func); 1379616db877STejun Heo pwq->stats[PWQ_STAT_CPU_INTENSIVE]++; 1380616db877STejun Heo 13810219a352STejun Heo if (kick_pool(pool)) 1382616db877STejun Heo pwq->stats[PWQ_STAT_CM_WAKEUP]++; 1383616db877STejun Heo 1384616db877STejun Heo raw_spin_unlock(&pool->lock); 1385616db877STejun Heo } 1386616db877STejun Heo 1387616db877STejun Heo /** 13881da177e4SLinus Torvalds * wq_worker_last_func - retrieve worker's last work function 13891da177e4SLinus Torvalds * @task: Task to retrieve last work function of. 13901da177e4SLinus Torvalds * 13911da177e4SLinus Torvalds * Determine the last function a worker executed. This is called from 13921da177e4SLinus Torvalds * the scheduler to get a worker's last known identity. 13931da177e4SLinus Torvalds * 13941da177e4SLinus Torvalds * CONTEXT: 13959b41ea72SAndrew Morton * raw_spin_lock_irq(rq->lock) 13961da177e4SLinus Torvalds * 13971da177e4SLinus Torvalds * This function is called during schedule() when a kworker is going 1398f756d5e2SNathan Lynch * to sleep. It's used by psi to identify aggregation workers during 1399f756d5e2SNathan Lynch * dequeuing, to allow periodic aggregation to shut-off when that 14001da177e4SLinus Torvalds * worker is the last task in the system or cgroup to go to sleep. 14011da177e4SLinus Torvalds * 14021da177e4SLinus Torvalds * As this function doesn't involve any workqueue-related locking, it 14031da177e4SLinus Torvalds * only returns stable values when called from inside the scheduler's 14041da177e4SLinus Torvalds * queuing and dequeuing paths, when @task, which must be a kworker, 14051da177e4SLinus Torvalds * is guaranteed to not be processing any works. 1406365970a1SDavid Howells * 1407365970a1SDavid Howells * Return: 1408365970a1SDavid Howells * The last work function %current executed as a worker, NULL if it 1409365970a1SDavid Howells * hasn't executed any work yet. 1410365970a1SDavid Howells */ 1411365970a1SDavid Howells work_func_t wq_worker_last_func(struct task_struct *task) 1412365970a1SDavid Howells { 1413365970a1SDavid Howells struct worker *worker = kthread_data(task); 1414365970a1SDavid Howells 1415365970a1SDavid Howells return worker->last_func; 1416365970a1SDavid Howells } 1417365970a1SDavid Howells 1418d302f017STejun Heo /** 14198864b4e5STejun Heo * get_pwq - get an extra reference on the specified pool_workqueue 14208864b4e5STejun Heo * @pwq: pool_workqueue to get 14218864b4e5STejun Heo * 14228864b4e5STejun Heo * Obtain an extra reference on @pwq. The caller should guarantee that 14238864b4e5STejun Heo * @pwq has positive refcnt and be holding the matching pool->lock. 14248864b4e5STejun Heo */ 14258864b4e5STejun Heo static void get_pwq(struct pool_workqueue *pwq) 14268864b4e5STejun Heo { 14278864b4e5STejun Heo lockdep_assert_held(&pwq->pool->lock); 14288864b4e5STejun Heo WARN_ON_ONCE(pwq->refcnt <= 0); 14298864b4e5STejun Heo pwq->refcnt++; 14308864b4e5STejun Heo } 14318864b4e5STejun Heo 14328864b4e5STejun Heo /** 14338864b4e5STejun Heo * put_pwq - put a pool_workqueue reference 14348864b4e5STejun Heo * @pwq: pool_workqueue to put 14358864b4e5STejun Heo * 14368864b4e5STejun Heo * Drop a reference of @pwq. If its refcnt reaches zero, schedule its 14378864b4e5STejun Heo * destruction. The caller should be holding the matching pool->lock. 14388864b4e5STejun Heo */ 14398864b4e5STejun Heo static void put_pwq(struct pool_workqueue *pwq) 14408864b4e5STejun Heo { 14418864b4e5STejun Heo lockdep_assert_held(&pwq->pool->lock); 14428864b4e5STejun Heo if (likely(--pwq->refcnt)) 14438864b4e5STejun Heo return; 14448864b4e5STejun Heo /* 1445967b494eSTejun Heo * @pwq can't be released under pool->lock, bounce to a dedicated 1446967b494eSTejun Heo * kthread_worker to avoid A-A deadlocks. 14478864b4e5STejun Heo */ 1448687a9aa5STejun Heo kthread_queue_work(pwq_release_worker, &pwq->release_work); 14498864b4e5STejun Heo } 14508864b4e5STejun Heo 1451dce90d47STejun Heo /** 1452dce90d47STejun Heo * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock 1453dce90d47STejun Heo * @pwq: pool_workqueue to put (can be %NULL) 1454dce90d47STejun Heo * 1455dce90d47STejun Heo * put_pwq() with locking. This function also allows %NULL @pwq. 1456dce90d47STejun Heo */ 1457dce90d47STejun Heo static void put_pwq_unlocked(struct pool_workqueue *pwq) 1458dce90d47STejun Heo { 1459dce90d47STejun Heo if (pwq) { 1460dce90d47STejun Heo /* 146124acfb71SThomas Gleixner * As both pwqs and pools are RCU protected, the 1462dce90d47STejun Heo * following lock operations are safe. 1463dce90d47STejun Heo */ 1464a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pwq->pool->lock); 1465dce90d47STejun Heo put_pwq(pwq); 1466a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pwq->pool->lock); 1467dce90d47STejun Heo } 1468dce90d47STejun Heo } 1469dce90d47STejun Heo 1470bad184d2STejun Heo static bool pwq_is_empty(struct pool_workqueue *pwq) 1471bad184d2STejun Heo { 1472bad184d2STejun Heo return !pwq->nr_active && list_empty(&pwq->inactive_works); 1473bad184d2STejun Heo } 1474bad184d2STejun Heo 14756c592f0bSTejun Heo static void __pwq_activate_work(struct pool_workqueue *pwq, 14766c592f0bSTejun Heo struct work_struct *work) 1477bf4ede01STejun Heo { 14784023a2d9STejun Heo unsigned long *wdb = work_data_bits(work); 14794023a2d9STejun Heo 14804023a2d9STejun Heo WARN_ON_ONCE(!(*wdb & WORK_STRUCT_INACTIVE)); 1481bf4ede01STejun Heo trace_workqueue_activate_work(work); 148282607adcSTejun Heo if (list_empty(&pwq->pool->worklist)) 148382607adcSTejun Heo pwq->pool->watchdog_ts = jiffies; 1484112202d9STejun Heo move_linked_works(work, &pwq->pool->worklist, NULL); 14854023a2d9STejun Heo __clear_bit(WORK_STRUCT_INACTIVE_BIT, wdb); 14866c592f0bSTejun Heo } 14876c592f0bSTejun Heo 14886c592f0bSTejun Heo /** 14896c592f0bSTejun Heo * pwq_activate_work - Activate a work item if inactive 14906c592f0bSTejun Heo * @pwq: pool_workqueue @work belongs to 14916c592f0bSTejun Heo * @work: work item to activate 14926c592f0bSTejun Heo * 14936c592f0bSTejun Heo * Returns %true if activated. %false if already active. 14946c592f0bSTejun Heo */ 14956c592f0bSTejun Heo static bool pwq_activate_work(struct pool_workqueue *pwq, 14966c592f0bSTejun Heo struct work_struct *work) 14976c592f0bSTejun Heo { 14986c592f0bSTejun Heo struct worker_pool *pool = pwq->pool; 14996c592f0bSTejun Heo 15006c592f0bSTejun Heo lockdep_assert_held(&pool->lock); 15016c592f0bSTejun Heo 15026c592f0bSTejun Heo if (!(*work_data_bits(work) & WORK_STRUCT_INACTIVE)) 15036c592f0bSTejun Heo return false; 15046c592f0bSTejun Heo 1505112202d9STejun Heo pwq->nr_active++; 15066c592f0bSTejun Heo __pwq_activate_work(pwq, work); 15076c592f0bSTejun Heo return true; 1508bf4ede01STejun Heo } 1509bf4ede01STejun Heo 15104023a2d9STejun Heo /** 15114023a2d9STejun Heo * pwq_tryinc_nr_active - Try to increment nr_active for a pwq 15124023a2d9STejun Heo * @pwq: pool_workqueue of interest 15134023a2d9STejun Heo * 15144023a2d9STejun Heo * Try to increment nr_active for @pwq. Returns %true if an nr_active count is 15154023a2d9STejun Heo * successfully obtained. %false otherwise. 15164023a2d9STejun Heo */ 15176741dd3fSGreg Kroah-Hartman static bool pwq_tryinc_nr_active(struct pool_workqueue *pwq) 15183aa62497SLai Jiangshan { 15194023a2d9STejun Heo struct workqueue_struct *wq = pwq->wq; 15204023a2d9STejun Heo struct worker_pool *pool = pwq->pool; 15216741dd3fSGreg Kroah-Hartman bool obtained; 15224023a2d9STejun Heo 15234023a2d9STejun Heo lockdep_assert_held(&pool->lock); 15244023a2d9STejun Heo 15254023a2d9STejun Heo obtained = pwq->nr_active < READ_ONCE(wq->max_active); 15265a70baecSTejun Heo 1527bfb429f3SGreg Kroah-Hartman if (obtained) 15285a70baecSTejun Heo pwq->nr_active++; 15294023a2d9STejun Heo return obtained; 15304023a2d9STejun Heo } 15314023a2d9STejun Heo 15324023a2d9STejun Heo /** 15334023a2d9STejun Heo * pwq_activate_first_inactive - Activate the first inactive work item on a pwq 15344023a2d9STejun Heo * @pwq: pool_workqueue of interest 15354023a2d9STejun Heo * 15364023a2d9STejun Heo * Activate the first inactive work item of @pwq if available and allowed by 15374023a2d9STejun Heo * max_active limit. 15384023a2d9STejun Heo * 15394023a2d9STejun Heo * Returns %true if an inactive work item has been activated. %false if no 15404023a2d9STejun Heo * inactive work item is found or max_active limit is reached. 15414023a2d9STejun Heo */ 15426741dd3fSGreg Kroah-Hartman static bool pwq_activate_first_inactive(struct pool_workqueue *pwq) 15434023a2d9STejun Heo { 15444023a2d9STejun Heo struct work_struct *work = 15454023a2d9STejun Heo list_first_entry_or_null(&pwq->inactive_works, 15463aa62497SLai Jiangshan struct work_struct, entry); 15473aa62497SLai Jiangshan 15486741dd3fSGreg Kroah-Hartman if (work && pwq_tryinc_nr_active(pwq)) { 15494023a2d9STejun Heo __pwq_activate_work(pwq, work); 15504023a2d9STejun Heo return true; 15514023a2d9STejun Heo } else { 15524023a2d9STejun Heo return false; 15534023a2d9STejun Heo } 15544023a2d9STejun Heo } 15554023a2d9STejun Heo 15564023a2d9STejun Heo /** 15574023a2d9STejun Heo * pwq_dec_nr_active - Retire an active count 15584023a2d9STejun Heo * @pwq: pool_workqueue of interest 15594023a2d9STejun Heo * 15604023a2d9STejun Heo * Decrement @pwq's nr_active and try to activate the first inactive work item. 15614023a2d9STejun Heo */ 15624023a2d9STejun Heo static void pwq_dec_nr_active(struct pool_workqueue *pwq) 15634023a2d9STejun Heo { 15644023a2d9STejun Heo struct worker_pool *pool = pwq->pool; 15654023a2d9STejun Heo 15664023a2d9STejun Heo lockdep_assert_held(&pool->lock); 15674023a2d9STejun Heo 15684023a2d9STejun Heo pwq->nr_active--; 15696741dd3fSGreg Kroah-Hartman pwq_activate_first_inactive(pwq); 15703aa62497SLai Jiangshan } 15713aa62497SLai Jiangshan 1572bf4ede01STejun Heo /** 1573112202d9STejun Heo * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight 1574112202d9STejun Heo * @pwq: pwq of interest 1575c4560c2cSLai Jiangshan * @work_data: work_data of work which left the queue 1576bf4ede01STejun Heo * 1577bf4ede01STejun Heo * A work either has completed or is removed from pending queue, 1578112202d9STejun Heo * decrement nr_in_flight of its pwq and handle workqueue flushing. 1579bf4ede01STejun Heo * 1580bf4ede01STejun Heo * CONTEXT: 1581a9b8a985SSebastian Andrzej Siewior * raw_spin_lock_irq(pool->lock). 1582bf4ede01STejun Heo */ 1583c4560c2cSLai Jiangshan static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data) 1584bf4ede01STejun Heo { 1585c4560c2cSLai Jiangshan int color = get_work_color(work_data); 1586c4560c2cSLai Jiangshan 15874023a2d9STejun Heo if (!(work_data & WORK_STRUCT_INACTIVE)) 15884023a2d9STejun Heo pwq_dec_nr_active(pwq); 1589018f3a13SLai Jiangshan 1590018f3a13SLai Jiangshan pwq->nr_in_flight[color]--; 1591bf4ede01STejun Heo 1592bf4ede01STejun Heo /* is flush in progress and are we at the flushing tip? */ 1593112202d9STejun Heo if (likely(pwq->flush_color != color)) 15948864b4e5STejun Heo goto out_put; 1595bf4ede01STejun Heo 1596bf4ede01STejun Heo /* are there still in-flight works? */ 1597112202d9STejun Heo if (pwq->nr_in_flight[color]) 15988864b4e5STejun Heo goto out_put; 1599bf4ede01STejun Heo 1600112202d9STejun Heo /* this pwq is done, clear flush_color */ 1601112202d9STejun Heo pwq->flush_color = -1; 1602bf4ede01STejun Heo 1603bf4ede01STejun Heo /* 1604112202d9STejun Heo * If this was the last pwq, wake up the first flusher. It 1605bf4ede01STejun Heo * will handle the rest. 1606bf4ede01STejun Heo */ 1607112202d9STejun Heo if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) 1608112202d9STejun Heo complete(&pwq->wq->first_flusher->done); 16098864b4e5STejun Heo out_put: 16108864b4e5STejun Heo put_pwq(pwq); 1611bf4ede01STejun Heo } 1612bf4ede01STejun Heo 161336e227d2STejun Heo /** 1614bbb68dfaSTejun Heo * try_to_grab_pending - steal work item from worklist and disable irq 161536e227d2STejun Heo * @work: work item to steal 161636e227d2STejun Heo * @is_dwork: @work is a delayed_work 1617bbb68dfaSTejun Heo * @flags: place to store irq state 161836e227d2STejun Heo * 161936e227d2STejun Heo * Try to grab PENDING bit of @work. This function can handle @work in any 1620d185af30SYacine Belkadi * stable state - idle, on timer or on worklist. 162136e227d2STejun Heo * 1622d185af30SYacine Belkadi * Return: 16233eb6b31bSMauro Carvalho Chehab * 16243eb6b31bSMauro Carvalho Chehab * ======== ================================================================ 162536e227d2STejun Heo * 1 if @work was pending and we successfully stole PENDING 162636e227d2STejun Heo * 0 if @work was idle and we claimed PENDING 162736e227d2STejun Heo * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry 1628bbb68dfaSTejun Heo * -ENOENT if someone else is canceling @work, this state may persist 1629bbb68dfaSTejun Heo * for arbitrarily long 16303eb6b31bSMauro Carvalho Chehab * ======== ================================================================ 163136e227d2STejun Heo * 1632d185af30SYacine Belkadi * Note: 1633bbb68dfaSTejun Heo * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting 1634e0aecdd8STejun Heo * interrupted while holding PENDING and @work off queue, irq must be 1635e0aecdd8STejun Heo * disabled on entry. This, combined with delayed_work->timer being 1636e0aecdd8STejun Heo * irqsafe, ensures that we return -EAGAIN for finite short period of time. 1637bbb68dfaSTejun Heo * 1638bbb68dfaSTejun Heo * On successful return, >= 0, irq is disabled and the caller is 1639bbb68dfaSTejun Heo * responsible for releasing it using local_irq_restore(*@flags). 1640bbb68dfaSTejun Heo * 1641e0aecdd8STejun Heo * This function is safe to call from any context including IRQ handler. 1642bf4ede01STejun Heo */ 1643bbb68dfaSTejun Heo static int try_to_grab_pending(struct work_struct *work, bool is_dwork, 1644bbb68dfaSTejun Heo unsigned long *flags) 1645bf4ede01STejun Heo { 1646d565ed63STejun Heo struct worker_pool *pool; 1647112202d9STejun Heo struct pool_workqueue *pwq; 1648bf4ede01STejun Heo 1649bbb68dfaSTejun Heo local_irq_save(*flags); 1650bbb68dfaSTejun Heo 165136e227d2STejun Heo /* try to steal the timer if it exists */ 165236e227d2STejun Heo if (is_dwork) { 165336e227d2STejun Heo struct delayed_work *dwork = to_delayed_work(work); 165436e227d2STejun Heo 1655e0aecdd8STejun Heo /* 1656e0aecdd8STejun Heo * dwork->timer is irqsafe. If del_timer() fails, it's 1657e0aecdd8STejun Heo * guaranteed that the timer is not queued anywhere and not 1658e0aecdd8STejun Heo * running on the local CPU. 1659e0aecdd8STejun Heo */ 166036e227d2STejun Heo if (likely(del_timer(&dwork->timer))) 166136e227d2STejun Heo return 1; 166236e227d2STejun Heo } 166336e227d2STejun Heo 166436e227d2STejun Heo /* try to claim PENDING the normal way */ 1665bf4ede01STejun Heo if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) 1666bf4ede01STejun Heo return 0; 1667bf4ede01STejun Heo 166824acfb71SThomas Gleixner rcu_read_lock(); 1669bf4ede01STejun Heo /* 1670bf4ede01STejun Heo * The queueing is in progress, or it is already queued. Try to 1671bf4ede01STejun Heo * steal it from ->worklist without clearing WORK_STRUCT_PENDING. 1672bf4ede01STejun Heo */ 1673d565ed63STejun Heo pool = get_work_pool(work); 1674d565ed63STejun Heo if (!pool) 1675bbb68dfaSTejun Heo goto fail; 1676bf4ede01STejun Heo 1677a9b8a985SSebastian Andrzej Siewior raw_spin_lock(&pool->lock); 1678bf4ede01STejun Heo /* 1679112202d9STejun Heo * work->data is guaranteed to point to pwq only while the work 1680112202d9STejun Heo * item is queued on pwq->wq, and both updating work->data to point 1681112202d9STejun Heo * to pwq on queueing and to pool on dequeueing are done under 1682112202d9STejun Heo * pwq->pool->lock. This in turn guarantees that, if work->data 1683112202d9STejun Heo * points to pwq which is associated with a locked pool, the work 16840b3dae68SLai Jiangshan * item is currently queued on that pool. 1685bf4ede01STejun Heo */ 1686112202d9STejun Heo pwq = get_work_pwq(work); 1687112202d9STejun Heo if (pwq && pwq->pool == pool) { 1688bf4ede01STejun Heo debug_work_deactivate(work); 16893aa62497SLai Jiangshan 16903aa62497SLai Jiangshan /* 1691018f3a13SLai Jiangshan * A cancelable inactive work item must be in the 1692018f3a13SLai Jiangshan * pwq->inactive_works since a queued barrier can't be 1693018f3a13SLai Jiangshan * canceled (see the comments in insert_wq_barrier()). 1694018f3a13SLai Jiangshan * 1695f97a4a1aSLai Jiangshan * An inactive work item cannot be grabbed directly because 1696d812796eSLai Jiangshan * it might have linked barrier work items which, if left 1697f97a4a1aSLai Jiangshan * on the inactive_works list, will confuse pwq->nr_active 169816062836STejun Heo * management later on and cause stall. Make sure the work 169916062836STejun Heo * item is activated before grabbing. 17003aa62497SLai Jiangshan */ 17016c592f0bSTejun Heo pwq_activate_work(pwq, work); 17023aa62497SLai Jiangshan 1703bf4ede01STejun Heo list_del_init(&work->entry); 1704c4560c2cSLai Jiangshan pwq_dec_nr_in_flight(pwq, *work_data_bits(work)); 170536e227d2STejun Heo 1706112202d9STejun Heo /* work->data points to pwq iff queued, point to pool */ 17074468a00fSLai Jiangshan set_work_pool_and_keep_pending(work, pool->id); 17084468a00fSLai Jiangshan 1709a9b8a985SSebastian Andrzej Siewior raw_spin_unlock(&pool->lock); 171024acfb71SThomas Gleixner rcu_read_unlock(); 171136e227d2STejun Heo return 1; 1712bf4ede01STejun Heo } 1713a9b8a985SSebastian Andrzej Siewior raw_spin_unlock(&pool->lock); 1714bbb68dfaSTejun Heo fail: 171524acfb71SThomas Gleixner rcu_read_unlock(); 1716bbb68dfaSTejun Heo local_irq_restore(*flags); 1717bbb68dfaSTejun Heo if (work_is_canceling(work)) 1718bbb68dfaSTejun Heo return -ENOENT; 1719bbb68dfaSTejun Heo cpu_relax(); 172036e227d2STejun Heo return -EAGAIN; 1721bf4ede01STejun Heo } 1722bf4ede01STejun Heo 1723bf4ede01STejun Heo /** 1724706026c2STejun Heo * insert_work - insert a work into a pool 1725112202d9STejun Heo * @pwq: pwq @work belongs to 17264690c4abSTejun Heo * @work: work to insert 17274690c4abSTejun Heo * @head: insertion point 17284690c4abSTejun Heo * @extra_flags: extra WORK_STRUCT_* flags to set 17294690c4abSTejun Heo * 1730112202d9STejun Heo * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to 1731706026c2STejun Heo * work_struct flags. 17324690c4abSTejun Heo * 17334690c4abSTejun Heo * CONTEXT: 1734a9b8a985SSebastian Andrzej Siewior * raw_spin_lock_irq(pool->lock). 1735365970a1SDavid Howells */ 1736112202d9STejun Heo static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, 1737112202d9STejun Heo struct list_head *head, unsigned int extra_flags) 1738b89deed3SOleg Nesterov { 1739fe089f87STejun Heo debug_work_activate(work); 1740e1d8aa9fSFrederic Weisbecker 1741e89a85d6SWalter Wu /* record the work call stack in order to print it in KASAN reports */ 1742f70da745SMarco Elver kasan_record_aux_stack_noalloc(work); 1743e89a85d6SWalter Wu 17444690c4abSTejun Heo /* we own @work, set data and link */ 1745112202d9STejun Heo set_work_pwq(work, pwq, extra_flags); 17461a4d9b0aSOleg Nesterov list_add_tail(&work->entry, head); 17478864b4e5STejun Heo get_pwq(pwq); 1748b89deed3SOleg Nesterov } 1749b89deed3SOleg Nesterov 1750c8efcc25STejun Heo /* 1751c8efcc25STejun Heo * Test whether @work is being queued from another work executing on the 17528d03ecfeSTejun Heo * same workqueue. 1753c8efcc25STejun Heo */ 1754c8efcc25STejun Heo static bool is_chained_work(struct workqueue_struct *wq) 1755c8efcc25STejun Heo { 1756c8efcc25STejun Heo struct worker *worker; 1757c8efcc25STejun Heo 17588d03ecfeSTejun Heo worker = current_wq_worker(); 1759c8efcc25STejun Heo /* 1760bf393fd4SBart Van Assche * Return %true iff I'm a worker executing a work item on @wq. If 17618d03ecfeSTejun Heo * I'm @worker, it's safe to dereference it without locking. 1762c8efcc25STejun Heo */ 1763112202d9STejun Heo return worker && worker->current_pwq->wq == wq; 1764c8efcc25STejun Heo } 1765c8efcc25STejun Heo 1766ef557180SMike Galbraith /* 1767ef557180SMike Galbraith * When queueing an unbound work item to a wq, prefer local CPU if allowed 1768ef557180SMike Galbraith * by wq_unbound_cpumask. Otherwise, round robin among the allowed ones to 1769ef557180SMike Galbraith * avoid perturbing sensitive tasks. 1770ef557180SMike Galbraith */ 1771ef557180SMike Galbraith static int wq_select_unbound_cpu(int cpu) 1772ef557180SMike Galbraith { 1773ef557180SMike Galbraith int new_cpu; 1774ef557180SMike Galbraith 1775f303fccbSTejun Heo if (likely(!wq_debug_force_rr_cpu)) { 1776ef557180SMike Galbraith if (cpumask_test_cpu(cpu, wq_unbound_cpumask)) 1777ef557180SMike Galbraith return cpu; 1778a8ec5880SAmmar Faizi } else { 1779a8ec5880SAmmar Faizi pr_warn_once("workqueue: round-robin CPU selection forced, expect performance impact\n"); 1780f303fccbSTejun Heo } 1781f303fccbSTejun Heo 1782ef557180SMike Galbraith new_cpu = __this_cpu_read(wq_rr_cpu_last); 1783ef557180SMike Galbraith new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask); 1784ef557180SMike Galbraith if (unlikely(new_cpu >= nr_cpu_ids)) { 1785ef557180SMike Galbraith new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask); 1786ef557180SMike Galbraith if (unlikely(new_cpu >= nr_cpu_ids)) 1787ef557180SMike Galbraith return cpu; 1788ef557180SMike Galbraith } 1789ef557180SMike Galbraith __this_cpu_write(wq_rr_cpu_last, new_cpu); 1790ef557180SMike Galbraith 1791ef557180SMike Galbraith return new_cpu; 1792ef557180SMike Galbraith } 1793ef557180SMike Galbraith 1794d84ff051STejun Heo static void __queue_work(int cpu, struct workqueue_struct *wq, 17951da177e4SLinus Torvalds struct work_struct *work) 17961da177e4SLinus Torvalds { 1797112202d9STejun Heo struct pool_workqueue *pwq; 1798fe089f87STejun Heo struct worker_pool *last_pool, *pool; 17998a2e8e5dSTejun Heo unsigned int work_flags; 1800b75cac93SJoonsoo Kim unsigned int req_cpu = cpu; 18018930cabaSTejun Heo 18028930cabaSTejun Heo /* 18038930cabaSTejun Heo * While a work item is PENDING && off queue, a task trying to 18048930cabaSTejun Heo * steal the PENDING will busy-loop waiting for it to either get 18058930cabaSTejun Heo * queued or lose PENDING. Grabbing PENDING and queueing should 18068930cabaSTejun Heo * happen with IRQ disabled. 18078930cabaSTejun Heo */ 18088e8eb730SFrederic Weisbecker lockdep_assert_irqs_disabled(); 18091da177e4SLinus Torvalds 18101e19ffc6STejun Heo 181133e3f0a3SRichard Clark /* 181233e3f0a3SRichard Clark * For a draining wq, only works from the same workqueue are 181333e3f0a3SRichard Clark * allowed. The __WQ_DESTROYING helps to spot the issue that 181433e3f0a3SRichard Clark * queues a new work item to a wq after destroy_workqueue(wq). 181533e3f0a3SRichard Clark */ 181633e3f0a3SRichard Clark if (unlikely(wq->flags & (__WQ_DESTROYING | __WQ_DRAINING) && 181733e3f0a3SRichard Clark WARN_ON_ONCE(!is_chained_work(wq)))) 1818e41e704bSTejun Heo return; 181924acfb71SThomas Gleixner rcu_read_lock(); 18209e8cd2f5STejun Heo retry: 1821aa202f1fSHillf Danton /* pwq which will be used unless @work is executing elsewhere */ 1822636b927eSTejun Heo if (req_cpu == WORK_CPU_UNBOUND) { 1823636b927eSTejun Heo if (wq->flags & WQ_UNBOUND) 1824ef557180SMike Galbraith cpu = wq_select_unbound_cpu(raw_smp_processor_id()); 1825636b927eSTejun Heo else 1826aa202f1fSHillf Danton cpu = raw_smp_processor_id(); 1827aa202f1fSHillf Danton } 1828f3421797STejun Heo 1829636b927eSTejun Heo pwq = rcu_dereference(*per_cpu_ptr(wq->cpu_pwq, cpu)); 1830fe089f87STejun Heo pool = pwq->pool; 1831fe089f87STejun Heo 183218aa9effSTejun Heo /* 1833c9178087STejun Heo * If @work was previously on a different pool, it might still be 1834c9178087STejun Heo * running there, in which case the work needs to be queued on that 1835c9178087STejun Heo * pool to guarantee non-reentrancy. 183618aa9effSTejun Heo */ 1837c9e7cf27STejun Heo last_pool = get_work_pool(work); 1838fe089f87STejun Heo if (last_pool && last_pool != pool) { 183918aa9effSTejun Heo struct worker *worker; 184018aa9effSTejun Heo 1841a9b8a985SSebastian Andrzej Siewior raw_spin_lock(&last_pool->lock); 184218aa9effSTejun Heo 1843c9e7cf27STejun Heo worker = find_worker_executing_work(last_pool, work); 184418aa9effSTejun Heo 1845112202d9STejun Heo if (worker && worker->current_pwq->wq == wq) { 1846c9178087STejun Heo pwq = worker->current_pwq; 1847fe089f87STejun Heo pool = pwq->pool; 1848fe089f87STejun Heo WARN_ON_ONCE(pool != last_pool); 18498594fadeSLai Jiangshan } else { 185018aa9effSTejun Heo /* meh... not running there, queue here */ 1851a9b8a985SSebastian Andrzej Siewior raw_spin_unlock(&last_pool->lock); 1852fe089f87STejun Heo raw_spin_lock(&pool->lock); 185318aa9effSTejun Heo } 18548930cabaSTejun Heo } else { 1855fe089f87STejun Heo raw_spin_lock(&pool->lock); 18568930cabaSTejun Heo } 1857502ca9d8STejun Heo 18589e8cd2f5STejun Heo /* 1859636b927eSTejun Heo * pwq is determined and locked. For unbound pools, we could have raced 1860636b927eSTejun Heo * with pwq release and it could already be dead. If its refcnt is zero, 1861636b927eSTejun Heo * repeat pwq selection. Note that unbound pwqs never die without 1862636b927eSTejun Heo * another pwq replacing it in cpu_pwq or while work items are executing 1863636b927eSTejun Heo * on it, so the retrying is guaranteed to make forward-progress. 18649e8cd2f5STejun Heo */ 18659e8cd2f5STejun Heo if (unlikely(!pwq->refcnt)) { 18669e8cd2f5STejun Heo if (wq->flags & WQ_UNBOUND) { 1867fe089f87STejun Heo raw_spin_unlock(&pool->lock); 18689e8cd2f5STejun Heo cpu_relax(); 18699e8cd2f5STejun Heo goto retry; 18709e8cd2f5STejun Heo } 18719e8cd2f5STejun Heo /* oops */ 18729e8cd2f5STejun Heo WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt", 18739e8cd2f5STejun Heo wq->name, cpu); 18749e8cd2f5STejun Heo } 18759e8cd2f5STejun Heo 1876112202d9STejun Heo /* pwq determined, queue */ 1877112202d9STejun Heo trace_workqueue_queue_work(req_cpu, pwq, work); 1878502ca9d8STejun Heo 187924acfb71SThomas Gleixner if (WARN_ON(!list_empty(&work->entry))) 188024acfb71SThomas Gleixner goto out; 18811e19ffc6STejun Heo 1882112202d9STejun Heo pwq->nr_in_flight[pwq->work_color]++; 1883112202d9STejun Heo work_flags = work_color_to_flags(pwq->work_color); 18841e19ffc6STejun Heo 188582e098f5STejun Heo /* 188682e098f5STejun Heo * Limit the number of concurrently active work items to max_active. 188782e098f5STejun Heo * @work must also queue behind existing inactive work items to maintain 188882e098f5STejun Heo * ordering when max_active changes. See wq_adjust_max_active(). 188982e098f5STejun Heo */ 18906741dd3fSGreg Kroah-Hartman if (list_empty(&pwq->inactive_works) && pwq_tryinc_nr_active(pwq)) { 1891fe089f87STejun Heo if (list_empty(&pool->worklist)) 1892fe089f87STejun Heo pool->watchdog_ts = jiffies; 1893fe089f87STejun Heo 1894cdadf009STejun Heo trace_workqueue_activate_work(work); 1895fe089f87STejun Heo insert_work(pwq, work, &pool->worklist, work_flags); 18960219a352STejun Heo kick_pool(pool); 18978a2e8e5dSTejun Heo } else { 1898f97a4a1aSLai Jiangshan work_flags |= WORK_STRUCT_INACTIVE; 1899fe089f87STejun Heo insert_work(pwq, work, &pwq->inactive_works, work_flags); 19008a2e8e5dSTejun Heo } 19011e19ffc6STejun Heo 190224acfb71SThomas Gleixner out: 1903fe089f87STejun Heo raw_spin_unlock(&pool->lock); 190424acfb71SThomas Gleixner rcu_read_unlock(); 19051da177e4SLinus Torvalds } 19061da177e4SLinus Torvalds 19070fcb78c2SRolf Eike Beer /** 1908c1a220e7SZhang Rui * queue_work_on - queue work on specific cpu 1909c1a220e7SZhang Rui * @cpu: CPU number to execute work on 1910c1a220e7SZhang Rui * @wq: workqueue to use 1911c1a220e7SZhang Rui * @work: work to queue 1912c1a220e7SZhang Rui * 1913c1a220e7SZhang Rui * We queue the work to a specific CPU, the caller must ensure it 1914443378f0SPaul E. McKenney * can't go away. Callers that fail to ensure that the specified 1915443378f0SPaul E. McKenney * CPU cannot go away will execute on a randomly chosen CPU. 1916854f5cc5SPaul E. McKenney * But note well that callers specifying a CPU that never has been 1917854f5cc5SPaul E. McKenney * online will get a splat. 1918d185af30SYacine Belkadi * 1919d185af30SYacine Belkadi * Return: %false if @work was already on a queue, %true otherwise. 1920c1a220e7SZhang Rui */ 1921d4283e93STejun Heo bool queue_work_on(int cpu, struct workqueue_struct *wq, 1922d4283e93STejun Heo struct work_struct *work) 1923c1a220e7SZhang Rui { 1924d4283e93STejun Heo bool ret = false; 19258930cabaSTejun Heo unsigned long flags; 19268930cabaSTejun Heo 19278930cabaSTejun Heo local_irq_save(flags); 1928c1a220e7SZhang Rui 192922df02bbSTejun Heo if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 19304690c4abSTejun Heo __queue_work(cpu, wq, work); 1931d4283e93STejun Heo ret = true; 1932c1a220e7SZhang Rui } 19338930cabaSTejun Heo 19348930cabaSTejun Heo local_irq_restore(flags); 1935c1a220e7SZhang Rui return ret; 1936c1a220e7SZhang Rui } 1937ad7b1f84SMarc Dionne EXPORT_SYMBOL(queue_work_on); 1938c1a220e7SZhang Rui 19398204e0c1SAlexander Duyck /** 1940fef59c9cSTejun Heo * select_numa_node_cpu - Select a CPU based on NUMA node 19418204e0c1SAlexander Duyck * @node: NUMA node ID that we want to select a CPU from 19428204e0c1SAlexander Duyck * 19438204e0c1SAlexander Duyck * This function will attempt to find a "random" cpu available on a given 19448204e0c1SAlexander Duyck * node. If there are no CPUs available on the given node it will return 19458204e0c1SAlexander Duyck * WORK_CPU_UNBOUND indicating that we should just schedule to any 19468204e0c1SAlexander Duyck * available CPU if we need to schedule this work. 19478204e0c1SAlexander Duyck */ 1948fef59c9cSTejun Heo static int select_numa_node_cpu(int node) 19498204e0c1SAlexander Duyck { 19508204e0c1SAlexander Duyck int cpu; 19518204e0c1SAlexander Duyck 19528204e0c1SAlexander Duyck /* Delay binding to CPU if node is not valid or online */ 19538204e0c1SAlexander Duyck if (node < 0 || node >= MAX_NUMNODES || !node_online(node)) 19548204e0c1SAlexander Duyck return WORK_CPU_UNBOUND; 19558204e0c1SAlexander Duyck 19568204e0c1SAlexander Duyck /* Use local node/cpu if we are already there */ 19578204e0c1SAlexander Duyck cpu = raw_smp_processor_id(); 19588204e0c1SAlexander Duyck if (node == cpu_to_node(cpu)) 19598204e0c1SAlexander Duyck return cpu; 19608204e0c1SAlexander Duyck 19618204e0c1SAlexander Duyck /* Use "random" otherwise know as "first" online CPU of node */ 19628204e0c1SAlexander Duyck cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask); 19638204e0c1SAlexander Duyck 19648204e0c1SAlexander Duyck /* If CPU is valid return that, otherwise just defer */ 19658204e0c1SAlexander Duyck return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND; 19668204e0c1SAlexander Duyck } 19678204e0c1SAlexander Duyck 19688204e0c1SAlexander Duyck /** 19698204e0c1SAlexander Duyck * queue_work_node - queue work on a "random" cpu for a given NUMA node 19708204e0c1SAlexander Duyck * @node: NUMA node that we are targeting the work for 19718204e0c1SAlexander Duyck * @wq: workqueue to use 19728204e0c1SAlexander Duyck * @work: work to queue 19738204e0c1SAlexander Duyck * 19748204e0c1SAlexander Duyck * We queue the work to a "random" CPU within a given NUMA node. The basic 19758204e0c1SAlexander Duyck * idea here is to provide a way to somehow associate work with a given 19768204e0c1SAlexander Duyck * NUMA node. 19778204e0c1SAlexander Duyck * 19788204e0c1SAlexander Duyck * This function will only make a best effort attempt at getting this onto 19798204e0c1SAlexander Duyck * the right NUMA node. If no node is requested or the requested node is 19808204e0c1SAlexander Duyck * offline then we just fall back to standard queue_work behavior. 19818204e0c1SAlexander Duyck * 19828204e0c1SAlexander Duyck * Currently the "random" CPU ends up being the first available CPU in the 19838204e0c1SAlexander Duyck * intersection of cpu_online_mask and the cpumask of the node, unless we 19848204e0c1SAlexander Duyck * are running on the node. In that case we just use the current CPU. 19858204e0c1SAlexander Duyck * 19868204e0c1SAlexander Duyck * Return: %false if @work was already on a queue, %true otherwise. 19878204e0c1SAlexander Duyck */ 19888204e0c1SAlexander Duyck bool queue_work_node(int node, struct workqueue_struct *wq, 19898204e0c1SAlexander Duyck struct work_struct *work) 19908204e0c1SAlexander Duyck { 19918204e0c1SAlexander Duyck unsigned long flags; 19928204e0c1SAlexander Duyck bool ret = false; 19938204e0c1SAlexander Duyck 19948204e0c1SAlexander Duyck /* 19958204e0c1SAlexander Duyck * This current implementation is specific to unbound workqueues. 19968204e0c1SAlexander Duyck * Specifically we only return the first available CPU for a given 19978204e0c1SAlexander Duyck * node instead of cycling through individual CPUs within the node. 19988204e0c1SAlexander Duyck * 19998204e0c1SAlexander Duyck * If this is used with a per-cpu workqueue then the logic in 20008204e0c1SAlexander Duyck * workqueue_select_cpu_near would need to be updated to allow for 20018204e0c1SAlexander Duyck * some round robin type logic. 20028204e0c1SAlexander Duyck */ 20038204e0c1SAlexander Duyck WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)); 20048204e0c1SAlexander Duyck 20058204e0c1SAlexander Duyck local_irq_save(flags); 20068204e0c1SAlexander Duyck 20078204e0c1SAlexander Duyck if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 2008fef59c9cSTejun Heo int cpu = select_numa_node_cpu(node); 20098204e0c1SAlexander Duyck 20108204e0c1SAlexander Duyck __queue_work(cpu, wq, work); 20118204e0c1SAlexander Duyck ret = true; 20128204e0c1SAlexander Duyck } 20138204e0c1SAlexander Duyck 20148204e0c1SAlexander Duyck local_irq_restore(flags); 20158204e0c1SAlexander Duyck return ret; 20168204e0c1SAlexander Duyck } 20178204e0c1SAlexander Duyck EXPORT_SYMBOL_GPL(queue_work_node); 20188204e0c1SAlexander Duyck 20198c20feb6SKees Cook void delayed_work_timer_fn(struct timer_list *t) 20201da177e4SLinus Torvalds { 20218c20feb6SKees Cook struct delayed_work *dwork = from_timer(dwork, t, timer); 20221da177e4SLinus Torvalds 2023e0aecdd8STejun Heo /* should have been called from irqsafe timer with irq already off */ 202460c057bcSLai Jiangshan __queue_work(dwork->cpu, dwork->wq, &dwork->work); 20251da177e4SLinus Torvalds } 20261438ade5SKonstantin Khlebnikov EXPORT_SYMBOL(delayed_work_timer_fn); 20271da177e4SLinus Torvalds 20287beb2edfSTejun Heo static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, 202952bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 20301da177e4SLinus Torvalds { 20317beb2edfSTejun Heo struct timer_list *timer = &dwork->timer; 20327beb2edfSTejun Heo struct work_struct *work = &dwork->work; 20331da177e4SLinus Torvalds 2034637fdbaeSTejun Heo WARN_ON_ONCE(!wq); 20354b243563SSami Tolvanen WARN_ON_ONCE(timer->function != delayed_work_timer_fn); 2036fc4b514fSTejun Heo WARN_ON_ONCE(timer_pending(timer)); 2037fc4b514fSTejun Heo WARN_ON_ONCE(!list_empty(&work->entry)); 20387beb2edfSTejun Heo 20398852aac2STejun Heo /* 20408852aac2STejun Heo * If @delay is 0, queue @dwork->work immediately. This is for 20418852aac2STejun Heo * both optimization and correctness. The earliest @timer can 20428852aac2STejun Heo * expire is on the closest next tick and delayed_work users depend 20438852aac2STejun Heo * on that there's no such delay when @delay is 0. 20448852aac2STejun Heo */ 20458852aac2STejun Heo if (!delay) { 20468852aac2STejun Heo __queue_work(cpu, wq, &dwork->work); 20478852aac2STejun Heo return; 20488852aac2STejun Heo } 20498852aac2STejun Heo 205060c057bcSLai Jiangshan dwork->wq = wq; 20511265057fSTejun Heo dwork->cpu = cpu; 20527beb2edfSTejun Heo timer->expires = jiffies + delay; 20537beb2edfSTejun Heo 2054041bd12eSTejun Heo if (unlikely(cpu != WORK_CPU_UNBOUND)) 20557beb2edfSTejun Heo add_timer_on(timer, cpu); 2056041bd12eSTejun Heo else 2057041bd12eSTejun Heo add_timer(timer); 20587beb2edfSTejun Heo } 20591da177e4SLinus Torvalds 20600fcb78c2SRolf Eike Beer /** 20610fcb78c2SRolf Eike Beer * queue_delayed_work_on - queue work on specific CPU after delay 20620fcb78c2SRolf Eike Beer * @cpu: CPU number to execute work on 20630fcb78c2SRolf Eike Beer * @wq: workqueue to use 2064af9997e4SRandy Dunlap * @dwork: work to queue 20650fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 20660fcb78c2SRolf Eike Beer * 2067d185af30SYacine Belkadi * Return: %false if @work was already on a queue, %true otherwise. If 2068715f1300STejun Heo * @delay is zero and @dwork is idle, it will be scheduled for immediate 2069715f1300STejun Heo * execution. 20700fcb78c2SRolf Eike Beer */ 2071d4283e93STejun Heo bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 207252bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 20737a6bc1cdSVenkatesh Pallipadi { 207452bad64dSDavid Howells struct work_struct *work = &dwork->work; 2075d4283e93STejun Heo bool ret = false; 20768930cabaSTejun Heo unsigned long flags; 20778930cabaSTejun Heo 20788930cabaSTejun Heo /* read the comment in __queue_work() */ 20798930cabaSTejun Heo local_irq_save(flags); 20807a6bc1cdSVenkatesh Pallipadi 208122df02bbSTejun Heo if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 20827beb2edfSTejun Heo __queue_delayed_work(cpu, wq, dwork, delay); 2083d4283e93STejun Heo ret = true; 20847a6bc1cdSVenkatesh Pallipadi } 20858930cabaSTejun Heo 20868930cabaSTejun Heo local_irq_restore(flags); 20877a6bc1cdSVenkatesh Pallipadi return ret; 20887a6bc1cdSVenkatesh Pallipadi } 2089ad7b1f84SMarc Dionne EXPORT_SYMBOL(queue_delayed_work_on); 20901da177e4SLinus Torvalds 2091c8e55f36STejun Heo /** 20928376fe22STejun Heo * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU 20938376fe22STejun Heo * @cpu: CPU number to execute work on 20948376fe22STejun Heo * @wq: workqueue to use 20958376fe22STejun Heo * @dwork: work to queue 20968376fe22STejun Heo * @delay: number of jiffies to wait before queueing 20978376fe22STejun Heo * 20988376fe22STejun Heo * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise, 20998376fe22STejun Heo * modify @dwork's timer so that it expires after @delay. If @delay is 21008376fe22STejun Heo * zero, @work is guaranteed to be scheduled immediately regardless of its 21018376fe22STejun Heo * current state. 21028376fe22STejun Heo * 2103d185af30SYacine Belkadi * Return: %false if @dwork was idle and queued, %true if @dwork was 21048376fe22STejun Heo * pending and its timer was modified. 21058376fe22STejun Heo * 2106e0aecdd8STejun Heo * This function is safe to call from any context including IRQ handler. 21078376fe22STejun Heo * See try_to_grab_pending() for details. 21088376fe22STejun Heo */ 21098376fe22STejun Heo bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 21108376fe22STejun Heo struct delayed_work *dwork, unsigned long delay) 21118376fe22STejun Heo { 21128376fe22STejun Heo unsigned long flags; 21138376fe22STejun Heo int ret; 21148376fe22STejun Heo 21158376fe22STejun Heo do { 21168376fe22STejun Heo ret = try_to_grab_pending(&dwork->work, true, &flags); 21178376fe22STejun Heo } while (unlikely(ret == -EAGAIN)); 21188376fe22STejun Heo 21198376fe22STejun Heo if (likely(ret >= 0)) { 21208376fe22STejun Heo __queue_delayed_work(cpu, wq, dwork, delay); 21218376fe22STejun Heo local_irq_restore(flags); 21228376fe22STejun Heo } 21238376fe22STejun Heo 21248376fe22STejun Heo /* -ENOENT from try_to_grab_pending() becomes %true */ 21258376fe22STejun Heo return ret; 21268376fe22STejun Heo } 21278376fe22STejun Heo EXPORT_SYMBOL_GPL(mod_delayed_work_on); 21288376fe22STejun Heo 212905f0fe6bSTejun Heo static void rcu_work_rcufn(struct rcu_head *rcu) 213005f0fe6bSTejun Heo { 213105f0fe6bSTejun Heo struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu); 213205f0fe6bSTejun Heo 213305f0fe6bSTejun Heo /* read the comment in __queue_work() */ 213405f0fe6bSTejun Heo local_irq_disable(); 213505f0fe6bSTejun Heo __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work); 213605f0fe6bSTejun Heo local_irq_enable(); 213705f0fe6bSTejun Heo } 213805f0fe6bSTejun Heo 213905f0fe6bSTejun Heo /** 214005f0fe6bSTejun Heo * queue_rcu_work - queue work after a RCU grace period 214105f0fe6bSTejun Heo * @wq: workqueue to use 214205f0fe6bSTejun Heo * @rwork: work to queue 214305f0fe6bSTejun Heo * 214405f0fe6bSTejun Heo * Return: %false if @rwork was already pending, %true otherwise. Note 214505f0fe6bSTejun Heo * that a full RCU grace period is guaranteed only after a %true return. 2146bf393fd4SBart Van Assche * While @rwork is guaranteed to be executed after a %false return, the 214705f0fe6bSTejun Heo * execution may happen before a full RCU grace period has passed. 214805f0fe6bSTejun Heo */ 214905f0fe6bSTejun Heo bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork) 215005f0fe6bSTejun Heo { 215105f0fe6bSTejun Heo struct work_struct *work = &rwork->work; 215205f0fe6bSTejun Heo 215305f0fe6bSTejun Heo if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 215405f0fe6bSTejun Heo rwork->wq = wq; 2155a7e30c0eSUladzislau Rezki call_rcu_hurry(&rwork->rcu, rcu_work_rcufn); 215605f0fe6bSTejun Heo return true; 215705f0fe6bSTejun Heo } 215805f0fe6bSTejun Heo 215905f0fe6bSTejun Heo return false; 216005f0fe6bSTejun Heo } 216105f0fe6bSTejun Heo EXPORT_SYMBOL(queue_rcu_work); 216205f0fe6bSTejun Heo 2163f7537df5SLai Jiangshan static struct worker *alloc_worker(int node) 2164c34056a3STejun Heo { 2165c34056a3STejun Heo struct worker *worker; 2166c34056a3STejun Heo 2167f7537df5SLai Jiangshan worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node); 2168c8e55f36STejun Heo if (worker) { 2169c8e55f36STejun Heo INIT_LIST_HEAD(&worker->entry); 2170affee4b2STejun Heo INIT_LIST_HEAD(&worker->scheduled); 2171da028469SLai Jiangshan INIT_LIST_HEAD(&worker->node); 2172e22bee78STejun Heo /* on creation a worker is in !idle && prep state */ 2173e22bee78STejun Heo worker->flags = WORKER_PREP; 2174c8e55f36STejun Heo } 2175c34056a3STejun Heo return worker; 2176c34056a3STejun Heo } 2177c34056a3STejun Heo 21789546b29eSTejun Heo static cpumask_t *pool_allowed_cpus(struct worker_pool *pool) 21799546b29eSTejun Heo { 21808639ecebSTejun Heo if (pool->cpu < 0 && pool->attrs->affn_strict) 21819546b29eSTejun Heo return pool->attrs->__pod_cpumask; 21828639ecebSTejun Heo else 21838639ecebSTejun Heo return pool->attrs->cpumask; 21849546b29eSTejun Heo } 21859546b29eSTejun Heo 2186c34056a3STejun Heo /** 21874736cbf7SLai Jiangshan * worker_attach_to_pool() - attach a worker to a pool 21884736cbf7SLai Jiangshan * @worker: worker to be attached 21894736cbf7SLai Jiangshan * @pool: the target pool 21904736cbf7SLai Jiangshan * 21914736cbf7SLai Jiangshan * Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and 21924736cbf7SLai Jiangshan * cpu-binding of @worker are kept coordinated with the pool across 21934736cbf7SLai Jiangshan * cpu-[un]hotplugs. 21944736cbf7SLai Jiangshan */ 21954736cbf7SLai Jiangshan static void worker_attach_to_pool(struct worker *worker, 21964736cbf7SLai Jiangshan struct worker_pool *pool) 21974736cbf7SLai Jiangshan { 21981258fae7STejun Heo mutex_lock(&wq_pool_attach_mutex); 21994736cbf7SLai Jiangshan 22004736cbf7SLai Jiangshan /* 22011258fae7STejun Heo * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains 22021258fae7STejun Heo * stable across this function. See the comments above the flag 22031258fae7STejun Heo * definition for details. 22044736cbf7SLai Jiangshan */ 22054736cbf7SLai Jiangshan if (pool->flags & POOL_DISASSOCIATED) 22064736cbf7SLai Jiangshan worker->flags |= WORKER_UNBOUND; 22075c25b5ffSPeter Zijlstra else 22085c25b5ffSPeter Zijlstra kthread_set_per_cpu(worker->task, pool->cpu); 22094736cbf7SLai Jiangshan 2210640f17c8SPeter Zijlstra if (worker->rescue_wq) 22119546b29eSTejun Heo set_cpus_allowed_ptr(worker->task, pool_allowed_cpus(pool)); 2212640f17c8SPeter Zijlstra 22134736cbf7SLai Jiangshan list_add_tail(&worker->node, &pool->workers); 2214a2d812a2STejun Heo worker->pool = pool; 22154736cbf7SLai Jiangshan 22161258fae7STejun Heo mutex_unlock(&wq_pool_attach_mutex); 22174736cbf7SLai Jiangshan } 22184736cbf7SLai Jiangshan 22194736cbf7SLai Jiangshan /** 222060f5a4bcSLai Jiangshan * worker_detach_from_pool() - detach a worker from its pool 222160f5a4bcSLai Jiangshan * @worker: worker which is attached to its pool 222260f5a4bcSLai Jiangshan * 22234736cbf7SLai Jiangshan * Undo the attaching which had been done in worker_attach_to_pool(). The 22244736cbf7SLai Jiangshan * caller worker shouldn't access to the pool after detached except it has 22254736cbf7SLai Jiangshan * other reference to the pool. 222660f5a4bcSLai Jiangshan */ 2227a2d812a2STejun Heo static void worker_detach_from_pool(struct worker *worker) 222860f5a4bcSLai Jiangshan { 2229a2d812a2STejun Heo struct worker_pool *pool = worker->pool; 223060f5a4bcSLai Jiangshan struct completion *detach_completion = NULL; 223160f5a4bcSLai Jiangshan 22321258fae7STejun Heo mutex_lock(&wq_pool_attach_mutex); 2233a2d812a2STejun Heo 22345c25b5ffSPeter Zijlstra kthread_set_per_cpu(worker->task, -1); 2235da028469SLai Jiangshan list_del(&worker->node); 2236a2d812a2STejun Heo worker->pool = NULL; 2237a2d812a2STejun Heo 2238e02b9312SValentin Schneider if (list_empty(&pool->workers) && list_empty(&pool->dying_workers)) 223960f5a4bcSLai Jiangshan detach_completion = pool->detach_completion; 22401258fae7STejun Heo mutex_unlock(&wq_pool_attach_mutex); 224160f5a4bcSLai Jiangshan 2242b62c0751SLai Jiangshan /* clear leftover flags without pool->lock after it is detached */ 2243b62c0751SLai Jiangshan worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND); 2244b62c0751SLai Jiangshan 224560f5a4bcSLai Jiangshan if (detach_completion) 224660f5a4bcSLai Jiangshan complete(detach_completion); 224760f5a4bcSLai Jiangshan } 224860f5a4bcSLai Jiangshan 224960f5a4bcSLai Jiangshan /** 2250c34056a3STejun Heo * create_worker - create a new workqueue worker 225163d95a91STejun Heo * @pool: pool the new worker will belong to 2252c34056a3STejun Heo * 2253051e1850SLai Jiangshan * Create and start a new worker which is attached to @pool. 2254c34056a3STejun Heo * 2255c34056a3STejun Heo * CONTEXT: 2256c34056a3STejun Heo * Might sleep. Does GFP_KERNEL allocations. 2257c34056a3STejun Heo * 2258d185af30SYacine Belkadi * Return: 2259c34056a3STejun Heo * Pointer to the newly created worker. 2260c34056a3STejun Heo */ 2261bc2ae0f5STejun Heo static struct worker *create_worker(struct worker_pool *pool) 2262c34056a3STejun Heo { 2263e441b56fSZhen Lei struct worker *worker; 2264e441b56fSZhen Lei int id; 22655d9c7a1eSLucy Mielke char id_buf[23]; 2266c34056a3STejun Heo 22677cda9aaeSLai Jiangshan /* ID is needed to determine kthread name */ 2268e441b56fSZhen Lei id = ida_alloc(&pool->worker_ida, GFP_KERNEL); 22693f0ea0b8SPetr Mladek if (id < 0) { 22703f0ea0b8SPetr Mladek pr_err_once("workqueue: Failed to allocate a worker ID: %pe\n", 22713f0ea0b8SPetr Mladek ERR_PTR(id)); 2272e441b56fSZhen Lei return NULL; 22733f0ea0b8SPetr Mladek } 2274c34056a3STejun Heo 2275f7537df5SLai Jiangshan worker = alloc_worker(pool->node); 22763f0ea0b8SPetr Mladek if (!worker) { 22773f0ea0b8SPetr Mladek pr_err_once("workqueue: Failed to allocate a worker\n"); 2278c34056a3STejun Heo goto fail; 22793f0ea0b8SPetr Mladek } 2280c34056a3STejun Heo 2281c34056a3STejun Heo worker->id = id; 2282c34056a3STejun Heo 228329c91e99STejun Heo if (pool->cpu >= 0) 2284e3c916a4STejun Heo snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id, 2285e3c916a4STejun Heo pool->attrs->nice < 0 ? "H" : ""); 2286f3421797STejun Heo else 2287e3c916a4STejun Heo snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id); 2288e3c916a4STejun Heo 2289f3f90ad4STejun Heo worker->task = kthread_create_on_node(worker_thread, worker, pool->node, 2290e3c916a4STejun Heo "kworker/%s", id_buf); 22913f0ea0b8SPetr Mladek if (IS_ERR(worker->task)) { 229260f54038SPetr Mladek if (PTR_ERR(worker->task) == -EINTR) { 229360f54038SPetr Mladek pr_err("workqueue: Interrupted when creating a worker thread \"kworker/%s\"\n", 229460f54038SPetr Mladek id_buf); 229560f54038SPetr Mladek } else { 22963f0ea0b8SPetr Mladek pr_err_once("workqueue: Failed to create a worker thread: %pe", 22973f0ea0b8SPetr Mladek worker->task); 229860f54038SPetr Mladek } 2299c34056a3STejun Heo goto fail; 23003f0ea0b8SPetr Mladek } 2301c34056a3STejun Heo 230291151228SOleg Nesterov set_user_nice(worker->task, pool->attrs->nice); 23039546b29eSTejun Heo kthread_bind_mask(worker->task, pool_allowed_cpus(pool)); 230491151228SOleg Nesterov 2305da028469SLai Jiangshan /* successful, attach the worker to the pool */ 23064736cbf7SLai Jiangshan worker_attach_to_pool(worker, pool); 2307822d8405STejun Heo 2308051e1850SLai Jiangshan /* start the newly created worker */ 2309a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pool->lock); 23100219a352STejun Heo 2311051e1850SLai Jiangshan worker->pool->nr_workers++; 2312051e1850SLai Jiangshan worker_enter_idle(worker); 23130219a352STejun Heo kick_pool(pool); 23140219a352STejun Heo 23150219a352STejun Heo /* 23160219a352STejun Heo * @worker is waiting on a completion in kthread() and will trigger hung 23170219a352STejun Heo * check if not woken up soon. As kick_pool() might not have waken it 23180219a352STejun Heo * up, wake it up explicitly once more. 23190219a352STejun Heo */ 2320051e1850SLai Jiangshan wake_up_process(worker->task); 23210219a352STejun Heo 2322a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 2323051e1850SLai Jiangshan 2324c34056a3STejun Heo return worker; 2325822d8405STejun Heo 2326c34056a3STejun Heo fail: 2327e441b56fSZhen Lei ida_free(&pool->worker_ida, id); 2328c34056a3STejun Heo kfree(worker); 2329c34056a3STejun Heo return NULL; 2330c34056a3STejun Heo } 2331c34056a3STejun Heo 2332793777bcSValentin Schneider static void unbind_worker(struct worker *worker) 2333793777bcSValentin Schneider { 2334793777bcSValentin Schneider lockdep_assert_held(&wq_pool_attach_mutex); 2335793777bcSValentin Schneider 2336793777bcSValentin Schneider kthread_set_per_cpu(worker->task, -1); 2337793777bcSValentin Schneider if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask)) 2338793777bcSValentin Schneider WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0); 2339793777bcSValentin Schneider else 2340793777bcSValentin Schneider WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0); 2341793777bcSValentin Schneider } 2342793777bcSValentin Schneider 2343e02b9312SValentin Schneider static void wake_dying_workers(struct list_head *cull_list) 2344e02b9312SValentin Schneider { 2345e02b9312SValentin Schneider struct worker *worker, *tmp; 2346e02b9312SValentin Schneider 2347e02b9312SValentin Schneider list_for_each_entry_safe(worker, tmp, cull_list, entry) { 2348e02b9312SValentin Schneider list_del_init(&worker->entry); 2349e02b9312SValentin Schneider unbind_worker(worker); 2350e02b9312SValentin Schneider /* 2351e02b9312SValentin Schneider * If the worker was somehow already running, then it had to be 2352e02b9312SValentin Schneider * in pool->idle_list when set_worker_dying() happened or we 2353e02b9312SValentin Schneider * wouldn't have gotten here. 2354c34056a3STejun Heo * 2355e02b9312SValentin Schneider * Thus, the worker must either have observed the WORKER_DIE 2356e02b9312SValentin Schneider * flag, or have set its state to TASK_IDLE. Either way, the 2357e02b9312SValentin Schneider * below will be observed by the worker and is safe to do 2358e02b9312SValentin Schneider * outside of pool->lock. 2359e02b9312SValentin Schneider */ 2360e02b9312SValentin Schneider wake_up_process(worker->task); 2361e02b9312SValentin Schneider } 2362e02b9312SValentin Schneider } 2363e02b9312SValentin Schneider 2364e02b9312SValentin Schneider /** 2365e02b9312SValentin Schneider * set_worker_dying - Tag a worker for destruction 2366e02b9312SValentin Schneider * @worker: worker to be destroyed 2367e02b9312SValentin Schneider * @list: transfer worker away from its pool->idle_list and into list 2368e02b9312SValentin Schneider * 2369e02b9312SValentin Schneider * Tag @worker for destruction and adjust @pool stats accordingly. The worker 2370e02b9312SValentin Schneider * should be idle. 2371c8e55f36STejun Heo * 2372c8e55f36STejun Heo * CONTEXT: 2373a9b8a985SSebastian Andrzej Siewior * raw_spin_lock_irq(pool->lock). 2374c34056a3STejun Heo */ 2375e02b9312SValentin Schneider static void set_worker_dying(struct worker *worker, struct list_head *list) 2376c34056a3STejun Heo { 2377bd7bdd43STejun Heo struct worker_pool *pool = worker->pool; 2378c34056a3STejun Heo 2379cd549687STejun Heo lockdep_assert_held(&pool->lock); 2380e02b9312SValentin Schneider lockdep_assert_held(&wq_pool_attach_mutex); 2381cd549687STejun Heo 2382c34056a3STejun Heo /* sanity check frenzy */ 23836183c009STejun Heo if (WARN_ON(worker->current_work) || 238473eb7fe7SLai Jiangshan WARN_ON(!list_empty(&worker->scheduled)) || 238573eb7fe7SLai Jiangshan WARN_ON(!(worker->flags & WORKER_IDLE))) 23866183c009STejun Heo return; 2387c34056a3STejun Heo 2388bd7bdd43STejun Heo pool->nr_workers--; 2389bd7bdd43STejun Heo pool->nr_idle--; 2390c8e55f36STejun Heo 2391cb444766STejun Heo worker->flags |= WORKER_DIE; 2392e02b9312SValentin Schneider 2393e02b9312SValentin Schneider list_move(&worker->entry, list); 2394e02b9312SValentin Schneider list_move(&worker->node, &pool->dying_workers); 2395c34056a3STejun Heo } 2396c34056a3STejun Heo 23973f959aa3SValentin Schneider /** 23983f959aa3SValentin Schneider * idle_worker_timeout - check if some idle workers can now be deleted. 23993f959aa3SValentin Schneider * @t: The pool's idle_timer that just expired 24003f959aa3SValentin Schneider * 24013f959aa3SValentin Schneider * The timer is armed in worker_enter_idle(). Note that it isn't disarmed in 24023f959aa3SValentin Schneider * worker_leave_idle(), as a worker flicking between idle and active while its 24033f959aa3SValentin Schneider * pool is at the too_many_workers() tipping point would cause too much timer 24043f959aa3SValentin Schneider * housekeeping overhead. Since IDLE_WORKER_TIMEOUT is long enough, we just let 24053f959aa3SValentin Schneider * it expire and re-evaluate things from there. 24063f959aa3SValentin Schneider */ 240732a6c723SKees Cook static void idle_worker_timeout(struct timer_list *t) 2408e22bee78STejun Heo { 240932a6c723SKees Cook struct worker_pool *pool = from_timer(pool, t, idle_timer); 24103f959aa3SValentin Schneider bool do_cull = false; 24113f959aa3SValentin Schneider 24123f959aa3SValentin Schneider if (work_pending(&pool->idle_cull_work)) 24133f959aa3SValentin Schneider return; 24143f959aa3SValentin Schneider 24153f959aa3SValentin Schneider raw_spin_lock_irq(&pool->lock); 24163f959aa3SValentin Schneider 24173f959aa3SValentin Schneider if (too_many_workers(pool)) { 24183f959aa3SValentin Schneider struct worker *worker; 24193f959aa3SValentin Schneider unsigned long expires; 24203f959aa3SValentin Schneider 24213f959aa3SValentin Schneider /* idle_list is kept in LIFO order, check the last one */ 24223f959aa3SValentin Schneider worker = list_entry(pool->idle_list.prev, struct worker, entry); 24233f959aa3SValentin Schneider expires = worker->last_active + IDLE_WORKER_TIMEOUT; 24243f959aa3SValentin Schneider do_cull = !time_before(jiffies, expires); 24253f959aa3SValentin Schneider 24263f959aa3SValentin Schneider if (!do_cull) 24273f959aa3SValentin Schneider mod_timer(&pool->idle_timer, expires); 24283f959aa3SValentin Schneider } 24293f959aa3SValentin Schneider raw_spin_unlock_irq(&pool->lock); 24303f959aa3SValentin Schneider 24313f959aa3SValentin Schneider if (do_cull) 24323f959aa3SValentin Schneider queue_work(system_unbound_wq, &pool->idle_cull_work); 24333f959aa3SValentin Schneider } 24343f959aa3SValentin Schneider 24353f959aa3SValentin Schneider /** 24363f959aa3SValentin Schneider * idle_cull_fn - cull workers that have been idle for too long. 24373f959aa3SValentin Schneider * @work: the pool's work for handling these idle workers 24383f959aa3SValentin Schneider * 24393f959aa3SValentin Schneider * This goes through a pool's idle workers and gets rid of those that have been 24403f959aa3SValentin Schneider * idle for at least IDLE_WORKER_TIMEOUT seconds. 2441e02b9312SValentin Schneider * 2442e02b9312SValentin Schneider * We don't want to disturb isolated CPUs because of a pcpu kworker being 2443e02b9312SValentin Schneider * culled, so this also resets worker affinity. This requires a sleepable 2444e02b9312SValentin Schneider * context, hence the split between timer callback and work item. 24453f959aa3SValentin Schneider */ 24463f959aa3SValentin Schneider static void idle_cull_fn(struct work_struct *work) 24473f959aa3SValentin Schneider { 24483f959aa3SValentin Schneider struct worker_pool *pool = container_of(work, struct worker_pool, idle_cull_work); 24499680540cSYang Yingliang LIST_HEAD(cull_list); 2450e22bee78STejun Heo 2451e02b9312SValentin Schneider /* 2452e02b9312SValentin Schneider * Grabbing wq_pool_attach_mutex here ensures an already-running worker 2453e02b9312SValentin Schneider * cannot proceed beyong worker_detach_from_pool() in its self-destruct 2454e02b9312SValentin Schneider * path. This is required as a previously-preempted worker could run after 2455e02b9312SValentin Schneider * set_worker_dying() has happened but before wake_dying_workers() did. 2456e02b9312SValentin Schneider */ 2457e02b9312SValentin Schneider mutex_lock(&wq_pool_attach_mutex); 2458a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pool->lock); 2459e22bee78STejun Heo 24603347fc9fSLai Jiangshan while (too_many_workers(pool)) { 2461e22bee78STejun Heo struct worker *worker; 2462e22bee78STejun Heo unsigned long expires; 2463e22bee78STejun Heo 246463d95a91STejun Heo worker = list_entry(pool->idle_list.prev, struct worker, entry); 2465e22bee78STejun Heo expires = worker->last_active + IDLE_WORKER_TIMEOUT; 2466e22bee78STejun Heo 24673347fc9fSLai Jiangshan if (time_before(jiffies, expires)) { 246863d95a91STejun Heo mod_timer(&pool->idle_timer, expires); 24693347fc9fSLai Jiangshan break; 2470e22bee78STejun Heo } 24713347fc9fSLai Jiangshan 2472e02b9312SValentin Schneider set_worker_dying(worker, &cull_list); 2473e22bee78STejun Heo } 2474e22bee78STejun Heo 2475a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 2476e02b9312SValentin Schneider wake_dying_workers(&cull_list); 2477e02b9312SValentin Schneider mutex_unlock(&wq_pool_attach_mutex); 2478e22bee78STejun Heo } 2479e22bee78STejun Heo 2480493a1724STejun Heo static void send_mayday(struct work_struct *work) 2481e22bee78STejun Heo { 2482112202d9STejun Heo struct pool_workqueue *pwq = get_work_pwq(work); 2483112202d9STejun Heo struct workqueue_struct *wq = pwq->wq; 2484493a1724STejun Heo 24852e109a28STejun Heo lockdep_assert_held(&wq_mayday_lock); 2486e22bee78STejun Heo 2487493008a8STejun Heo if (!wq->rescuer) 2488493a1724STejun Heo return; 2489e22bee78STejun Heo 2490e22bee78STejun Heo /* mayday mayday mayday */ 2491493a1724STejun Heo if (list_empty(&pwq->mayday_node)) { 249277668c8bSLai Jiangshan /* 249377668c8bSLai Jiangshan * If @pwq is for an unbound wq, its base ref may be put at 249477668c8bSLai Jiangshan * any time due to an attribute change. Pin @pwq until the 249577668c8bSLai Jiangshan * rescuer is done with it. 249677668c8bSLai Jiangshan */ 249777668c8bSLai Jiangshan get_pwq(pwq); 2498493a1724STejun Heo list_add_tail(&pwq->mayday_node, &wq->maydays); 2499e22bee78STejun Heo wake_up_process(wq->rescuer->task); 2500725e8ec5STejun Heo pwq->stats[PWQ_STAT_MAYDAY]++; 2501493a1724STejun Heo } 2502e22bee78STejun Heo } 2503e22bee78STejun Heo 250432a6c723SKees Cook static void pool_mayday_timeout(struct timer_list *t) 2505e22bee78STejun Heo { 250632a6c723SKees Cook struct worker_pool *pool = from_timer(pool, t, mayday_timer); 2507e22bee78STejun Heo struct work_struct *work; 2508e22bee78STejun Heo 2509a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pool->lock); 2510a9b8a985SSebastian Andrzej Siewior raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */ 2511e22bee78STejun Heo 251263d95a91STejun Heo if (need_to_create_worker(pool)) { 2513e22bee78STejun Heo /* 2514e22bee78STejun Heo * We've been trying to create a new worker but 2515e22bee78STejun Heo * haven't been successful. We might be hitting an 2516e22bee78STejun Heo * allocation deadlock. Send distress signals to 2517e22bee78STejun Heo * rescuers. 2518e22bee78STejun Heo */ 251963d95a91STejun Heo list_for_each_entry(work, &pool->worklist, entry) 2520e22bee78STejun Heo send_mayday(work); 2521e22bee78STejun Heo } 2522e22bee78STejun Heo 2523a9b8a985SSebastian Andrzej Siewior raw_spin_unlock(&wq_mayday_lock); 2524a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 2525e22bee78STejun Heo 252663d95a91STejun Heo mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); 2527e22bee78STejun Heo } 2528e22bee78STejun Heo 2529e22bee78STejun Heo /** 2530e22bee78STejun Heo * maybe_create_worker - create a new worker if necessary 253163d95a91STejun Heo * @pool: pool to create a new worker for 2532e22bee78STejun Heo * 253363d95a91STejun Heo * Create a new worker for @pool if necessary. @pool is guaranteed to 2534e22bee78STejun Heo * have at least one idle worker on return from this function. If 2535e22bee78STejun Heo * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is 253663d95a91STejun Heo * sent to all rescuers with works scheduled on @pool to resolve 2537e22bee78STejun Heo * possible allocation deadlock. 2538e22bee78STejun Heo * 2539c5aa87bbSTejun Heo * On return, need_to_create_worker() is guaranteed to be %false and 2540c5aa87bbSTejun Heo * may_start_working() %true. 2541e22bee78STejun Heo * 2542e22bee78STejun Heo * LOCKING: 2543a9b8a985SSebastian Andrzej Siewior * raw_spin_lock_irq(pool->lock) which may be released and regrabbed 2544e22bee78STejun Heo * multiple times. Does GFP_KERNEL allocations. Called only from 2545e22bee78STejun Heo * manager. 2546e22bee78STejun Heo */ 254729187a9eSTejun Heo static void maybe_create_worker(struct worker_pool *pool) 2548d565ed63STejun Heo __releases(&pool->lock) 2549d565ed63STejun Heo __acquires(&pool->lock) 2550e22bee78STejun Heo { 2551e22bee78STejun Heo restart: 2552a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 25539f9c2364STejun Heo 2554e22bee78STejun Heo /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ 255563d95a91STejun Heo mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); 2556e22bee78STejun Heo 2557e22bee78STejun Heo while (true) { 2558051e1850SLai Jiangshan if (create_worker(pool) || !need_to_create_worker(pool)) 2559e22bee78STejun Heo break; 2560e22bee78STejun Heo 2561e212f361SLai Jiangshan schedule_timeout_interruptible(CREATE_COOLDOWN); 25629f9c2364STejun Heo 256363d95a91STejun Heo if (!need_to_create_worker(pool)) 2564e22bee78STejun Heo break; 2565e22bee78STejun Heo } 2566e22bee78STejun Heo 256763d95a91STejun Heo del_timer_sync(&pool->mayday_timer); 2568a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pool->lock); 2569051e1850SLai Jiangshan /* 2570051e1850SLai Jiangshan * This is necessary even after a new worker was just successfully 2571051e1850SLai Jiangshan * created as @pool->lock was dropped and the new worker might have 2572051e1850SLai Jiangshan * already become busy. 2573051e1850SLai Jiangshan */ 257463d95a91STejun Heo if (need_to_create_worker(pool)) 2575e22bee78STejun Heo goto restart; 2576e22bee78STejun Heo } 2577e22bee78STejun Heo 2578e22bee78STejun Heo /** 2579e22bee78STejun Heo * manage_workers - manage worker pool 2580e22bee78STejun Heo * @worker: self 2581e22bee78STejun Heo * 2582706026c2STejun Heo * Assume the manager role and manage the worker pool @worker belongs 2583e22bee78STejun Heo * to. At any given time, there can be only zero or one manager per 2584706026c2STejun Heo * pool. The exclusion is handled automatically by this function. 2585e22bee78STejun Heo * 2586e22bee78STejun Heo * The caller can safely start processing works on false return. On 2587e22bee78STejun Heo * true return, it's guaranteed that need_to_create_worker() is false 2588e22bee78STejun Heo * and may_start_working() is true. 2589e22bee78STejun Heo * 2590e22bee78STejun Heo * CONTEXT: 2591a9b8a985SSebastian Andrzej Siewior * raw_spin_lock_irq(pool->lock) which may be released and regrabbed 2592e22bee78STejun Heo * multiple times. Does GFP_KERNEL allocations. 2593e22bee78STejun Heo * 2594d185af30SYacine Belkadi * Return: 259529187a9eSTejun Heo * %false if the pool doesn't need management and the caller can safely 259629187a9eSTejun Heo * start processing works, %true if management function was performed and 259729187a9eSTejun Heo * the conditions that the caller verified before calling the function may 259829187a9eSTejun Heo * no longer be true. 2599e22bee78STejun Heo */ 2600e22bee78STejun Heo static bool manage_workers(struct worker *worker) 2601e22bee78STejun Heo { 260263d95a91STejun Heo struct worker_pool *pool = worker->pool; 2603e22bee78STejun Heo 2604692b4825STejun Heo if (pool->flags & POOL_MANAGER_ACTIVE) 260529187a9eSTejun Heo return false; 2606692b4825STejun Heo 2607692b4825STejun Heo pool->flags |= POOL_MANAGER_ACTIVE; 26082607d7a6STejun Heo pool->manager = worker; 2609e22bee78STejun Heo 261029187a9eSTejun Heo maybe_create_worker(pool); 2611e22bee78STejun Heo 26122607d7a6STejun Heo pool->manager = NULL; 2613692b4825STejun Heo pool->flags &= ~POOL_MANAGER_ACTIVE; 2614d8bb65abSSebastian Andrzej Siewior rcuwait_wake_up(&manager_wait); 261529187a9eSTejun Heo return true; 2616e22bee78STejun Heo } 2617e22bee78STejun Heo 2618a62428c0STejun Heo /** 2619a62428c0STejun Heo * process_one_work - process single work 2620c34056a3STejun Heo * @worker: self 2621a62428c0STejun Heo * @work: work to process 2622a62428c0STejun Heo * 2623a62428c0STejun Heo * Process @work. This function contains all the logics necessary to 2624a62428c0STejun Heo * process a single work including synchronization against and 2625a62428c0STejun Heo * interaction with other workers on the same cpu, queueing and 2626a62428c0STejun Heo * flushing. As long as context requirement is met, any worker can 2627a62428c0STejun Heo * call this function to process a work. 2628a62428c0STejun Heo * 2629a62428c0STejun Heo * CONTEXT: 2630a9b8a985SSebastian Andrzej Siewior * raw_spin_lock_irq(pool->lock) which is released and regrabbed. 2631a62428c0STejun Heo */ 2632c34056a3STejun Heo static void process_one_work(struct worker *worker, struct work_struct *work) 2633d565ed63STejun Heo __releases(&pool->lock) 2634d565ed63STejun Heo __acquires(&pool->lock) 26351da177e4SLinus Torvalds { 2636112202d9STejun Heo struct pool_workqueue *pwq = get_work_pwq(work); 2637bd7bdd43STejun Heo struct worker_pool *pool = worker->pool; 2638c4560c2cSLai Jiangshan unsigned long work_data; 26394e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP 26404e6045f1SJohannes Berg /* 2641a62428c0STejun Heo * It is permissible to free the struct work_struct from 2642a62428c0STejun Heo * inside the function that is called from it, this we need to 2643a62428c0STejun Heo * take into account for lockdep too. To avoid bogus "held 2644a62428c0STejun Heo * lock freed" warnings as well as problems when looking into 2645a62428c0STejun Heo * work->lockdep_map, make a copy and use that here. 26464e6045f1SJohannes Berg */ 26474d82a1deSPeter Zijlstra struct lockdep_map lockdep_map; 26484d82a1deSPeter Zijlstra 26494d82a1deSPeter Zijlstra lockdep_copy_map(&lockdep_map, &work->lockdep_map); 26504e6045f1SJohannes Berg #endif 2651807407c0SLai Jiangshan /* ensure we're on the correct CPU */ 265285327af6SLai Jiangshan WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && 2653ec22ca5eSTejun Heo raw_smp_processor_id() != pool->cpu); 265425511a47STejun Heo 26558930cabaSTejun Heo /* claim and dequeue */ 2656dc186ad7SThomas Gleixner debug_work_deactivate(work); 2657c9e7cf27STejun Heo hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); 2658c34056a3STejun Heo worker->current_work = work; 2659a2c1c57bSTejun Heo worker->current_func = work->func; 2660112202d9STejun Heo worker->current_pwq = pwq; 2661616db877STejun Heo worker->current_at = worker->task->se.sum_exec_runtime; 2662c4560c2cSLai Jiangshan work_data = *work_data_bits(work); 2663d812796eSLai Jiangshan worker->current_color = get_work_color(work_data); 26647a22ad75STejun Heo 26658bf89593STejun Heo /* 26668bf89593STejun Heo * Record wq name for cmdline and debug reporting, may get 26678bf89593STejun Heo * overridden through set_worker_desc(). 26688bf89593STejun Heo */ 26698bf89593STejun Heo strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN); 26708bf89593STejun Heo 2671a62428c0STejun Heo list_del_init(&work->entry); 2672a62428c0STejun Heo 2673649027d7STejun Heo /* 2674228f1d00SLai Jiangshan * CPU intensive works don't participate in concurrency management. 2675228f1d00SLai Jiangshan * They're the scheduler's responsibility. This takes @worker out 2676228f1d00SLai Jiangshan * of concurrency management and the next code block will chain 2677228f1d00SLai Jiangshan * execution of the pending work items. 2678fb0e7bebSTejun Heo */ 2679616db877STejun Heo if (unlikely(pwq->wq->flags & WQ_CPU_INTENSIVE)) 2680228f1d00SLai Jiangshan worker_set_flags(worker, WORKER_CPU_INTENSIVE); 2681fb0e7bebSTejun Heo 2682974271c4STejun Heo /* 26830219a352STejun Heo * Kick @pool if necessary. It's always noop for per-cpu worker pools 26840219a352STejun Heo * since nr_running would always be >= 1 at this point. This is used to 26850219a352STejun Heo * chain execution of the pending work items for WORKER_NOT_RUNNING 26860219a352STejun Heo * workers such as the UNBOUND and CPU_INTENSIVE ones. 2687974271c4STejun Heo */ 26880219a352STejun Heo kick_pool(pool); 2689974271c4STejun Heo 26908930cabaSTejun Heo /* 26917c3eed5cSTejun Heo * Record the last pool and clear PENDING which should be the last 2692d565ed63STejun Heo * update to @work. Also, do this inside @pool->lock so that 269323657bb1STejun Heo * PENDING and queued state changes happen together while IRQ is 269423657bb1STejun Heo * disabled. 26958930cabaSTejun Heo */ 26967c3eed5cSTejun Heo set_work_pool_and_clear_pending(work, pool->id); 26971da177e4SLinus Torvalds 2698fe48ba7dSMirsad Goran Todorovac pwq->stats[PWQ_STAT_STARTED]++; 2699a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 2700365970a1SDavid Howells 2701a1d14934SPeter Zijlstra lock_map_acquire(&pwq->wq->lockdep_map); 27023295f0efSIngo Molnar lock_map_acquire(&lockdep_map); 2703e6f3faa7SPeter Zijlstra /* 2704f52be570SPeter Zijlstra * Strictly speaking we should mark the invariant state without holding 2705f52be570SPeter Zijlstra * any locks, that is, before these two lock_map_acquire()'s. 2706e6f3faa7SPeter Zijlstra * 2707e6f3faa7SPeter Zijlstra * However, that would result in: 2708e6f3faa7SPeter Zijlstra * 2709e6f3faa7SPeter Zijlstra * A(W1) 2710e6f3faa7SPeter Zijlstra * WFC(C) 2711e6f3faa7SPeter Zijlstra * A(W1) 2712e6f3faa7SPeter Zijlstra * C(C) 2713e6f3faa7SPeter Zijlstra * 2714e6f3faa7SPeter Zijlstra * Which would create W1->C->W1 dependencies, even though there is no 2715e6f3faa7SPeter Zijlstra * actual deadlock possible. There are two solutions, using a 2716e6f3faa7SPeter Zijlstra * read-recursive acquire on the work(queue) 'locks', but this will then 2717f52be570SPeter Zijlstra * hit the lockdep limitation on recursive locks, or simply discard 2718e6f3faa7SPeter Zijlstra * these locks. 2719e6f3faa7SPeter Zijlstra * 2720e6f3faa7SPeter Zijlstra * AFAICT there is no possible deadlock scenario between the 2721e6f3faa7SPeter Zijlstra * flush_work() and complete() primitives (except for single-threaded 2722e6f3faa7SPeter Zijlstra * workqueues), so hiding them isn't a problem. 2723e6f3faa7SPeter Zijlstra */ 2724f52be570SPeter Zijlstra lockdep_invariant_state(true); 2725e36c886aSArjan van de Ven trace_workqueue_execute_start(work); 2726a2c1c57bSTejun Heo worker->current_func(work); 2727e36c886aSArjan van de Ven /* 2728e36c886aSArjan van de Ven * While we must be careful to not use "work" after this, the trace 2729e36c886aSArjan van de Ven * point will only record its address. 2730e36c886aSArjan van de Ven */ 27311c5da0ecSDaniel Jordan trace_workqueue_execute_end(work, worker->current_func); 2732725e8ec5STejun Heo pwq->stats[PWQ_STAT_COMPLETED]++; 27333295f0efSIngo Molnar lock_map_release(&lockdep_map); 2734112202d9STejun Heo lock_map_release(&pwq->wq->lockdep_map); 27351da177e4SLinus Torvalds 2736d5abe669SPeter Zijlstra if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 2737044c782cSValentin Ilie pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" 2738d75f773cSSakari Ailus " last function: %ps\n", 2739a2c1c57bSTejun Heo current->comm, preempt_count(), task_pid_nr(current), 2740a2c1c57bSTejun Heo worker->current_func); 2741d5abe669SPeter Zijlstra debug_show_held_locks(current); 2742d5abe669SPeter Zijlstra dump_stack(); 2743d5abe669SPeter Zijlstra } 2744d5abe669SPeter Zijlstra 2745b22ce278STejun Heo /* 2746025f50f3SSebastian Andrzej Siewior * The following prevents a kworker from hogging CPU on !PREEMPTION 2747b22ce278STejun Heo * kernels, where a requeueing work item waiting for something to 2748b22ce278STejun Heo * happen could deadlock with stop_machine as such work item could 2749b22ce278STejun Heo * indefinitely requeue itself while all other CPUs are trapped in 2750789cbbecSJoe Lawrence * stop_machine. At the same time, report a quiescent RCU state so 2751789cbbecSJoe Lawrence * the same condition doesn't freeze RCU. 2752b22ce278STejun Heo */ 2753a7e6425eSPaul E. McKenney cond_resched(); 2754b22ce278STejun Heo 2755a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pool->lock); 2756a62428c0STejun Heo 2757616db877STejun Heo /* 2758616db877STejun Heo * In addition to %WQ_CPU_INTENSIVE, @worker may also have been marked 2759616db877STejun Heo * CPU intensive by wq_worker_tick() if @work hogged CPU longer than 2760616db877STejun Heo * wq_cpu_intensive_thresh_us. Clear it. 2761616db877STejun Heo */ 2762fb0e7bebSTejun Heo worker_clr_flags(worker, WORKER_CPU_INTENSIVE); 2763fb0e7bebSTejun Heo 27641b69ac6bSJohannes Weiner /* tag the worker for identification in schedule() */ 27651b69ac6bSJohannes Weiner worker->last_func = worker->current_func; 27661b69ac6bSJohannes Weiner 2767a62428c0STejun Heo /* we're done with it, release */ 276842f8570fSSasha Levin hash_del(&worker->hentry); 2769c34056a3STejun Heo worker->current_work = NULL; 2770a2c1c57bSTejun Heo worker->current_func = NULL; 2771112202d9STejun Heo worker->current_pwq = NULL; 2772d812796eSLai Jiangshan worker->current_color = INT_MAX; 2773c4560c2cSLai Jiangshan pwq_dec_nr_in_flight(pwq, work_data); 27741da177e4SLinus Torvalds } 27751da177e4SLinus Torvalds 2776affee4b2STejun Heo /** 2777affee4b2STejun Heo * process_scheduled_works - process scheduled works 2778affee4b2STejun Heo * @worker: self 2779affee4b2STejun Heo * 2780affee4b2STejun Heo * Process all scheduled works. Please note that the scheduled list 2781affee4b2STejun Heo * may change while processing a work, so this function repeatedly 2782affee4b2STejun Heo * fetches a work from the top and executes it. 2783affee4b2STejun Heo * 2784affee4b2STejun Heo * CONTEXT: 2785a9b8a985SSebastian Andrzej Siewior * raw_spin_lock_irq(pool->lock) which may be released and regrabbed 2786affee4b2STejun Heo * multiple times. 2787affee4b2STejun Heo */ 2788affee4b2STejun Heo static void process_scheduled_works(struct worker *worker) 27891da177e4SLinus Torvalds { 2790c0ab017dSTejun Heo struct work_struct *work; 2791c0ab017dSTejun Heo bool first = true; 2792c0ab017dSTejun Heo 2793c0ab017dSTejun Heo while ((work = list_first_entry_or_null(&worker->scheduled, 2794c0ab017dSTejun Heo struct work_struct, entry))) { 2795c0ab017dSTejun Heo if (first) { 2796c0ab017dSTejun Heo worker->pool->watchdog_ts = jiffies; 2797c0ab017dSTejun Heo first = false; 2798c0ab017dSTejun Heo } 2799c34056a3STejun Heo process_one_work(worker, work); 2800a62428c0STejun Heo } 28011da177e4SLinus Torvalds } 28021da177e4SLinus Torvalds 2803197f6accSTejun Heo static void set_pf_worker(bool val) 2804197f6accSTejun Heo { 2805197f6accSTejun Heo mutex_lock(&wq_pool_attach_mutex); 2806197f6accSTejun Heo if (val) 2807197f6accSTejun Heo current->flags |= PF_WQ_WORKER; 2808197f6accSTejun Heo else 2809197f6accSTejun Heo current->flags &= ~PF_WQ_WORKER; 2810197f6accSTejun Heo mutex_unlock(&wq_pool_attach_mutex); 2811197f6accSTejun Heo } 2812197f6accSTejun Heo 28134690c4abSTejun Heo /** 28144690c4abSTejun Heo * worker_thread - the worker thread function 2815c34056a3STejun Heo * @__worker: self 28164690c4abSTejun Heo * 2817c5aa87bbSTejun Heo * The worker thread function. All workers belong to a worker_pool - 2818c5aa87bbSTejun Heo * either a per-cpu one or dynamic unbound one. These workers process all 2819c5aa87bbSTejun Heo * work items regardless of their specific target workqueue. The only 2820c5aa87bbSTejun Heo * exception is work items which belong to workqueues with a rescuer which 2821c5aa87bbSTejun Heo * will be explained in rescuer_thread(). 2822d185af30SYacine Belkadi * 2823d185af30SYacine Belkadi * Return: 0 28244690c4abSTejun Heo */ 2825c34056a3STejun Heo static int worker_thread(void *__worker) 28261da177e4SLinus Torvalds { 2827c34056a3STejun Heo struct worker *worker = __worker; 2828bd7bdd43STejun Heo struct worker_pool *pool = worker->pool; 28291da177e4SLinus Torvalds 2830e22bee78STejun Heo /* tell the scheduler that this is a workqueue worker */ 2831197f6accSTejun Heo set_pf_worker(true); 2832c8e55f36STejun Heo woke_up: 2833a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pool->lock); 2834affee4b2STejun Heo 2835a9ab775bSTejun Heo /* am I supposed to die? */ 2836a9ab775bSTejun Heo if (unlikely(worker->flags & WORKER_DIE)) { 2837a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 2838197f6accSTejun Heo set_pf_worker(false); 283960f5a4bcSLai Jiangshan 284060f5a4bcSLai Jiangshan set_task_comm(worker->task, "kworker/dying"); 2841e441b56fSZhen Lei ida_free(&pool->worker_ida, worker->id); 2842a2d812a2STejun Heo worker_detach_from_pool(worker); 2843e02b9312SValentin Schneider WARN_ON_ONCE(!list_empty(&worker->entry)); 284460f5a4bcSLai Jiangshan kfree(worker); 2845c8e55f36STejun Heo return 0; 2846c8e55f36STejun Heo } 2847c8e55f36STejun Heo 2848c8e55f36STejun Heo worker_leave_idle(worker); 2849db7bccf4STejun Heo recheck: 2850e22bee78STejun Heo /* no more worker necessary? */ 285163d95a91STejun Heo if (!need_more_worker(pool)) 2852e22bee78STejun Heo goto sleep; 2853e22bee78STejun Heo 2854e22bee78STejun Heo /* do we need to manage? */ 285563d95a91STejun Heo if (unlikely(!may_start_working(pool)) && manage_workers(worker)) 2856e22bee78STejun Heo goto recheck; 2857e22bee78STejun Heo 2858c8e55f36STejun Heo /* 2859c8e55f36STejun Heo * ->scheduled list can only be filled while a worker is 2860c8e55f36STejun Heo * preparing to process a work or actually processing it. 2861c8e55f36STejun Heo * Make sure nobody diddled with it while I was sleeping. 2862c8e55f36STejun Heo */ 28636183c009STejun Heo WARN_ON_ONCE(!list_empty(&worker->scheduled)); 2864c8e55f36STejun Heo 2865e22bee78STejun Heo /* 2866a9ab775bSTejun Heo * Finish PREP stage. We're guaranteed to have at least one idle 2867a9ab775bSTejun Heo * worker or that someone else has already assumed the manager 2868a9ab775bSTejun Heo * role. This is where @worker starts participating in concurrency 2869a9ab775bSTejun Heo * management if applicable and concurrency management is restored 2870a9ab775bSTejun Heo * after being rebound. See rebind_workers() for details. 2871e22bee78STejun Heo */ 2872a9ab775bSTejun Heo worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND); 2873e22bee78STejun Heo 2874e22bee78STejun Heo do { 2875affee4b2STejun Heo struct work_struct *work = 2876bd7bdd43STejun Heo list_first_entry(&pool->worklist, 2877affee4b2STejun Heo struct work_struct, entry); 2878affee4b2STejun Heo 2879873eaca6STejun Heo if (assign_work(work, worker, NULL)) 2880affee4b2STejun Heo process_scheduled_works(worker); 288163d95a91STejun Heo } while (keep_working(pool)); 2882affee4b2STejun Heo 2883228f1d00SLai Jiangshan worker_set_flags(worker, WORKER_PREP); 2884d313dd85STejun Heo sleep: 2885c8e55f36STejun Heo /* 2886d565ed63STejun Heo * pool->lock is held and there's no work to process and no need to 2887d565ed63STejun Heo * manage, sleep. Workers are woken up only while holding 2888d565ed63STejun Heo * pool->lock or from local cpu, so setting the current state 2889d565ed63STejun Heo * before releasing pool->lock is enough to prevent losing any 2890d565ed63STejun Heo * event. 2891c8e55f36STejun Heo */ 2892c8e55f36STejun Heo worker_enter_idle(worker); 2893c5a94a61SPeter Zijlstra __set_current_state(TASK_IDLE); 2894a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 28951da177e4SLinus Torvalds schedule(); 2896c8e55f36STejun Heo goto woke_up; 28971da177e4SLinus Torvalds } 28981da177e4SLinus Torvalds 2899e22bee78STejun Heo /** 2900e22bee78STejun Heo * rescuer_thread - the rescuer thread function 2901111c225aSTejun Heo * @__rescuer: self 2902e22bee78STejun Heo * 2903e22bee78STejun Heo * Workqueue rescuer thread function. There's one rescuer for each 2904493008a8STejun Heo * workqueue which has WQ_MEM_RECLAIM set. 2905e22bee78STejun Heo * 2906706026c2STejun Heo * Regular work processing on a pool may block trying to create a new 2907e22bee78STejun Heo * worker which uses GFP_KERNEL allocation which has slight chance of 2908e22bee78STejun Heo * developing into deadlock if some works currently on the same queue 2909e22bee78STejun Heo * need to be processed to satisfy the GFP_KERNEL allocation. This is 2910e22bee78STejun Heo * the problem rescuer solves. 2911e22bee78STejun Heo * 2912706026c2STejun Heo * When such condition is possible, the pool summons rescuers of all 2913706026c2STejun Heo * workqueues which have works queued on the pool and let them process 2914e22bee78STejun Heo * those works so that forward progress can be guaranteed. 2915e22bee78STejun Heo * 2916e22bee78STejun Heo * This should happen rarely. 2917d185af30SYacine Belkadi * 2918d185af30SYacine Belkadi * Return: 0 2919e22bee78STejun Heo */ 2920111c225aSTejun Heo static int rescuer_thread(void *__rescuer) 2921e22bee78STejun Heo { 2922111c225aSTejun Heo struct worker *rescuer = __rescuer; 2923111c225aSTejun Heo struct workqueue_struct *wq = rescuer->rescue_wq; 29244d595b86SLai Jiangshan bool should_stop; 2925e22bee78STejun Heo 2926e22bee78STejun Heo set_user_nice(current, RESCUER_NICE_LEVEL); 2927111c225aSTejun Heo 2928111c225aSTejun Heo /* 2929111c225aSTejun Heo * Mark rescuer as worker too. As WORKER_PREP is never cleared, it 2930111c225aSTejun Heo * doesn't participate in concurrency management. 2931111c225aSTejun Heo */ 2932197f6accSTejun Heo set_pf_worker(true); 2933e22bee78STejun Heo repeat: 2934c5a94a61SPeter Zijlstra set_current_state(TASK_IDLE); 29351da177e4SLinus Torvalds 29364d595b86SLai Jiangshan /* 29374d595b86SLai Jiangshan * By the time the rescuer is requested to stop, the workqueue 29384d595b86SLai Jiangshan * shouldn't have any work pending, but @wq->maydays may still have 29394d595b86SLai Jiangshan * pwq(s) queued. This can happen by non-rescuer workers consuming 29404d595b86SLai Jiangshan * all the work items before the rescuer got to them. Go through 29414d595b86SLai Jiangshan * @wq->maydays processing before acting on should_stop so that the 29424d595b86SLai Jiangshan * list is always empty on exit. 29434d595b86SLai Jiangshan */ 29444d595b86SLai Jiangshan should_stop = kthread_should_stop(); 29451da177e4SLinus Torvalds 2946493a1724STejun Heo /* see whether any pwq is asking for help */ 2947a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&wq_mayday_lock); 2948493a1724STejun Heo 2949493a1724STejun Heo while (!list_empty(&wq->maydays)) { 2950493a1724STejun Heo struct pool_workqueue *pwq = list_first_entry(&wq->maydays, 2951493a1724STejun Heo struct pool_workqueue, mayday_node); 2952112202d9STejun Heo struct worker_pool *pool = pwq->pool; 2953e22bee78STejun Heo struct work_struct *work, *n; 2954e22bee78STejun Heo 2955e22bee78STejun Heo __set_current_state(TASK_RUNNING); 2956493a1724STejun Heo list_del_init(&pwq->mayday_node); 2957493a1724STejun Heo 2958a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&wq_mayday_lock); 2959e22bee78STejun Heo 296051697d39SLai Jiangshan worker_attach_to_pool(rescuer, pool); 296151697d39SLai Jiangshan 2962a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pool->lock); 2963e22bee78STejun Heo 2964e22bee78STejun Heo /* 2965e22bee78STejun Heo * Slurp in all works issued via this workqueue and 2966e22bee78STejun Heo * process'em. 2967e22bee78STejun Heo */ 2968873eaca6STejun Heo WARN_ON_ONCE(!list_empty(&rescuer->scheduled)); 296982607adcSTejun Heo list_for_each_entry_safe(work, n, &pool->worklist, entry) { 2970873eaca6STejun Heo if (get_work_pwq(work) == pwq && 2971873eaca6STejun Heo assign_work(work, rescuer, &n)) 2972725e8ec5STejun Heo pwq->stats[PWQ_STAT_RESCUED]++; 297382607adcSTejun Heo } 2974e22bee78STejun Heo 2975873eaca6STejun Heo if (!list_empty(&rescuer->scheduled)) { 2976e22bee78STejun Heo process_scheduled_works(rescuer); 29777576958aSTejun Heo 29787576958aSTejun Heo /* 2979008847f6SNeilBrown * The above execution of rescued work items could 2980008847f6SNeilBrown * have created more to rescue through 2981f97a4a1aSLai Jiangshan * pwq_activate_first_inactive() or chained 2982008847f6SNeilBrown * queueing. Let's put @pwq back on mayday list so 2983008847f6SNeilBrown * that such back-to-back work items, which may be 2984008847f6SNeilBrown * being used to relieve memory pressure, don't 2985008847f6SNeilBrown * incur MAYDAY_INTERVAL delay inbetween. 2986008847f6SNeilBrown */ 29874f3f4cf3SLai Jiangshan if (pwq->nr_active && need_to_create_worker(pool)) { 2988a9b8a985SSebastian Andrzej Siewior raw_spin_lock(&wq_mayday_lock); 2989e66b39afSTejun Heo /* 2990e66b39afSTejun Heo * Queue iff we aren't racing destruction 2991e66b39afSTejun Heo * and somebody else hasn't queued it already. 2992e66b39afSTejun Heo */ 2993e66b39afSTejun Heo if (wq->rescuer && list_empty(&pwq->mayday_node)) { 2994008847f6SNeilBrown get_pwq(pwq); 2995e66b39afSTejun Heo list_add_tail(&pwq->mayday_node, &wq->maydays); 2996e66b39afSTejun Heo } 2997a9b8a985SSebastian Andrzej Siewior raw_spin_unlock(&wq_mayday_lock); 2998008847f6SNeilBrown } 2999008847f6SNeilBrown } 3000008847f6SNeilBrown 3001008847f6SNeilBrown /* 300277668c8bSLai Jiangshan * Put the reference grabbed by send_mayday(). @pool won't 300313b1d625SLai Jiangshan * go away while we're still attached to it. 300477668c8bSLai Jiangshan */ 300577668c8bSLai Jiangshan put_pwq(pwq); 300677668c8bSLai Jiangshan 300777668c8bSLai Jiangshan /* 30080219a352STejun Heo * Leave this pool. Notify regular workers; otherwise, we end up 30090219a352STejun Heo * with 0 concurrency and stalling the execution. 30107576958aSTejun Heo */ 30110219a352STejun Heo kick_pool(pool); 30127576958aSTejun Heo 3013a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 301413b1d625SLai Jiangshan 3015a2d812a2STejun Heo worker_detach_from_pool(rescuer); 301613b1d625SLai Jiangshan 3017a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&wq_mayday_lock); 30181da177e4SLinus Torvalds } 30191da177e4SLinus Torvalds 3020a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&wq_mayday_lock); 3021493a1724STejun Heo 30224d595b86SLai Jiangshan if (should_stop) { 30234d595b86SLai Jiangshan __set_current_state(TASK_RUNNING); 3024197f6accSTejun Heo set_pf_worker(false); 30254d595b86SLai Jiangshan return 0; 30264d595b86SLai Jiangshan } 30274d595b86SLai Jiangshan 3028111c225aSTejun Heo /* rescuers should never participate in concurrency management */ 3029111c225aSTejun Heo WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING)); 3030e22bee78STejun Heo schedule(); 3031e22bee78STejun Heo goto repeat; 30321da177e4SLinus Torvalds } 30331da177e4SLinus Torvalds 3034fca839c0STejun Heo /** 3035fca839c0STejun Heo * check_flush_dependency - check for flush dependency sanity 3036fca839c0STejun Heo * @target_wq: workqueue being flushed 3037fca839c0STejun Heo * @target_work: work item being flushed (NULL for workqueue flushes) 3038fca839c0STejun Heo * 3039fca839c0STejun Heo * %current is trying to flush the whole @target_wq or @target_work on it. 3040fca839c0STejun Heo * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not 3041fca839c0STejun Heo * reclaiming memory or running on a workqueue which doesn't have 3042fca839c0STejun Heo * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to 3043fca839c0STejun Heo * a deadlock. 3044fca839c0STejun Heo */ 3045fca839c0STejun Heo static void check_flush_dependency(struct workqueue_struct *target_wq, 3046fca839c0STejun Heo struct work_struct *target_work) 3047fca839c0STejun Heo { 3048fca839c0STejun Heo work_func_t target_func = target_work ? target_work->func : NULL; 3049fca839c0STejun Heo struct worker *worker; 3050fca839c0STejun Heo 3051fca839c0STejun Heo if (target_wq->flags & WQ_MEM_RECLAIM) 3052fca839c0STejun Heo return; 3053fca839c0STejun Heo 3054fca839c0STejun Heo worker = current_wq_worker(); 3055fca839c0STejun Heo 3056fca839c0STejun Heo WARN_ONCE(current->flags & PF_MEMALLOC, 3057d75f773cSSakari Ailus "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps", 3058fca839c0STejun Heo current->pid, current->comm, target_wq->name, target_func); 305923d11a58STejun Heo WARN_ONCE(worker && ((worker->current_pwq->wq->flags & 306023d11a58STejun Heo (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM), 3061d75f773cSSakari Ailus "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps", 3062fca839c0STejun Heo worker->current_pwq->wq->name, worker->current_func, 3063fca839c0STejun Heo target_wq->name, target_func); 3064fca839c0STejun Heo } 3065fca839c0STejun Heo 3066fc2e4d70SOleg Nesterov struct wq_barrier { 3067fc2e4d70SOleg Nesterov struct work_struct work; 3068fc2e4d70SOleg Nesterov struct completion done; 30692607d7a6STejun Heo struct task_struct *task; /* purely informational */ 3070fc2e4d70SOleg Nesterov }; 3071fc2e4d70SOleg Nesterov 3072fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work) 3073fc2e4d70SOleg Nesterov { 3074fc2e4d70SOleg Nesterov struct wq_barrier *barr = container_of(work, struct wq_barrier, work); 3075fc2e4d70SOleg Nesterov complete(&barr->done); 3076fc2e4d70SOleg Nesterov } 3077fc2e4d70SOleg Nesterov 30784690c4abSTejun Heo /** 30794690c4abSTejun Heo * insert_wq_barrier - insert a barrier work 3080112202d9STejun Heo * @pwq: pwq to insert barrier into 30814690c4abSTejun Heo * @barr: wq_barrier to insert 3082affee4b2STejun Heo * @target: target work to attach @barr to 3083affee4b2STejun Heo * @worker: worker currently executing @target, NULL if @target is not executing 30844690c4abSTejun Heo * 3085affee4b2STejun Heo * @barr is linked to @target such that @barr is completed only after 3086affee4b2STejun Heo * @target finishes execution. Please note that the ordering 3087affee4b2STejun Heo * guarantee is observed only with respect to @target and on the local 3088affee4b2STejun Heo * cpu. 3089affee4b2STejun Heo * 3090affee4b2STejun Heo * Currently, a queued barrier can't be canceled. This is because 3091affee4b2STejun Heo * try_to_grab_pending() can't determine whether the work to be 3092affee4b2STejun Heo * grabbed is at the head of the queue and thus can't clear LINKED 3093affee4b2STejun Heo * flag of the previous work while there must be a valid next work 3094affee4b2STejun Heo * after a work with LINKED flag set. 3095affee4b2STejun Heo * 3096affee4b2STejun Heo * Note that when @worker is non-NULL, @target may be modified 3097112202d9STejun Heo * underneath us, so we can't reliably determine pwq from @target. 30984690c4abSTejun Heo * 30994690c4abSTejun Heo * CONTEXT: 3100a9b8a985SSebastian Andrzej Siewior * raw_spin_lock_irq(pool->lock). 31014690c4abSTejun Heo */ 3102112202d9STejun Heo static void insert_wq_barrier(struct pool_workqueue *pwq, 3103affee4b2STejun Heo struct wq_barrier *barr, 3104affee4b2STejun Heo struct work_struct *target, struct worker *worker) 3105fc2e4d70SOleg Nesterov { 3106d812796eSLai Jiangshan unsigned int work_flags = 0; 3107d812796eSLai Jiangshan unsigned int work_color; 3108affee4b2STejun Heo struct list_head *head; 3109affee4b2STejun Heo 3110dc186ad7SThomas Gleixner /* 3111d565ed63STejun Heo * debugobject calls are safe here even with pool->lock locked 3112dc186ad7SThomas Gleixner * as we know for sure that this will not trigger any of the 3113dc186ad7SThomas Gleixner * checks and call back into the fixup functions where we 3114dc186ad7SThomas Gleixner * might deadlock. 3115dc186ad7SThomas Gleixner */ 3116ca1cab37SAndrew Morton INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); 311722df02bbSTejun Heo __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); 311852fa5bc5SBoqun Feng 3119fd1a5b04SByungchul Park init_completion_map(&barr->done, &target->lockdep_map); 3120fd1a5b04SByungchul Park 31212607d7a6STejun Heo barr->task = current; 312283c22520SOleg Nesterov 31236741dd3fSGreg Kroah-Hartman /* The barrier work item does not participate in pwq->nr_active. */ 3124018f3a13SLai Jiangshan work_flags |= WORK_STRUCT_INACTIVE; 3125018f3a13SLai Jiangshan 3126affee4b2STejun Heo /* 3127affee4b2STejun Heo * If @target is currently being executed, schedule the 3128affee4b2STejun Heo * barrier to the worker; otherwise, put it after @target. 3129affee4b2STejun Heo */ 3130d812796eSLai Jiangshan if (worker) { 3131affee4b2STejun Heo head = worker->scheduled.next; 3132d812796eSLai Jiangshan work_color = worker->current_color; 3133d812796eSLai Jiangshan } else { 3134affee4b2STejun Heo unsigned long *bits = work_data_bits(target); 3135affee4b2STejun Heo 3136affee4b2STejun Heo head = target->entry.next; 3137affee4b2STejun Heo /* there can already be other linked works, inherit and set */ 3138d21cece0SLai Jiangshan work_flags |= *bits & WORK_STRUCT_LINKED; 3139d812796eSLai Jiangshan work_color = get_work_color(*bits); 3140affee4b2STejun Heo __set_bit(WORK_STRUCT_LINKED_BIT, bits); 3141affee4b2STejun Heo } 3142affee4b2STejun Heo 3143d812796eSLai Jiangshan pwq->nr_in_flight[work_color]++; 3144d812796eSLai Jiangshan work_flags |= work_color_to_flags(work_color); 3145d812796eSLai Jiangshan 3146d21cece0SLai Jiangshan insert_work(pwq, &barr->work, head, work_flags); 3147fc2e4d70SOleg Nesterov } 3148fc2e4d70SOleg Nesterov 314973f53c4aSTejun Heo /** 3150112202d9STejun Heo * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing 315173f53c4aSTejun Heo * @wq: workqueue being flushed 315273f53c4aSTejun Heo * @flush_color: new flush color, < 0 for no-op 315373f53c4aSTejun Heo * @work_color: new work color, < 0 for no-op 315473f53c4aSTejun Heo * 3155112202d9STejun Heo * Prepare pwqs for workqueue flushing. 315673f53c4aSTejun Heo * 3157112202d9STejun Heo * If @flush_color is non-negative, flush_color on all pwqs should be 3158112202d9STejun Heo * -1. If no pwq has in-flight commands at the specified color, all 3159112202d9STejun Heo * pwq->flush_color's stay at -1 and %false is returned. If any pwq 3160112202d9STejun Heo * has in flight commands, its pwq->flush_color is set to 3161112202d9STejun Heo * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq 316273f53c4aSTejun Heo * wakeup logic is armed and %true is returned. 316373f53c4aSTejun Heo * 316473f53c4aSTejun Heo * The caller should have initialized @wq->first_flusher prior to 316573f53c4aSTejun Heo * calling this function with non-negative @flush_color. If 316673f53c4aSTejun Heo * @flush_color is negative, no flush color update is done and %false 316773f53c4aSTejun Heo * is returned. 316873f53c4aSTejun Heo * 3169112202d9STejun Heo * If @work_color is non-negative, all pwqs should have the same 317073f53c4aSTejun Heo * work_color which is previous to @work_color and all will be 317173f53c4aSTejun Heo * advanced to @work_color. 317273f53c4aSTejun Heo * 317373f53c4aSTejun Heo * CONTEXT: 31743c25a55dSLai Jiangshan * mutex_lock(wq->mutex). 317573f53c4aSTejun Heo * 3176d185af30SYacine Belkadi * Return: 317773f53c4aSTejun Heo * %true if @flush_color >= 0 and there's something to flush. %false 317873f53c4aSTejun Heo * otherwise. 317973f53c4aSTejun Heo */ 3180112202d9STejun Heo static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, 318173f53c4aSTejun Heo int flush_color, int work_color) 31821da177e4SLinus Torvalds { 318373f53c4aSTejun Heo bool wait = false; 318449e3cf44STejun Heo struct pool_workqueue *pwq; 31851da177e4SLinus Torvalds 318673f53c4aSTejun Heo if (flush_color >= 0) { 31876183c009STejun Heo WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush)); 3188112202d9STejun Heo atomic_set(&wq->nr_pwqs_to_flush, 1); 3189dc186ad7SThomas Gleixner } 319014441960SOleg Nesterov 319149e3cf44STejun Heo for_each_pwq(pwq, wq) { 3192112202d9STejun Heo struct worker_pool *pool = pwq->pool; 31931da177e4SLinus Torvalds 3194a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pool->lock); 319573f53c4aSTejun Heo 319673f53c4aSTejun Heo if (flush_color >= 0) { 31976183c009STejun Heo WARN_ON_ONCE(pwq->flush_color != -1); 319873f53c4aSTejun Heo 3199112202d9STejun Heo if (pwq->nr_in_flight[flush_color]) { 3200112202d9STejun Heo pwq->flush_color = flush_color; 3201112202d9STejun Heo atomic_inc(&wq->nr_pwqs_to_flush); 320273f53c4aSTejun Heo wait = true; 32031da177e4SLinus Torvalds } 320473f53c4aSTejun Heo } 320573f53c4aSTejun Heo 320673f53c4aSTejun Heo if (work_color >= 0) { 32076183c009STejun Heo WARN_ON_ONCE(work_color != work_next_color(pwq->work_color)); 3208112202d9STejun Heo pwq->work_color = work_color; 320973f53c4aSTejun Heo } 321073f53c4aSTejun Heo 3211a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 32121da177e4SLinus Torvalds } 32131da177e4SLinus Torvalds 3214112202d9STejun Heo if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) 321573f53c4aSTejun Heo complete(&wq->first_flusher->done); 321673f53c4aSTejun Heo 321773f53c4aSTejun Heo return wait; 321883c22520SOleg Nesterov } 32191da177e4SLinus Torvalds 32200fcb78c2SRolf Eike Beer /** 3221c4f135d6STetsuo Handa * __flush_workqueue - ensure that any scheduled work has run to completion. 32220fcb78c2SRolf Eike Beer * @wq: workqueue to flush 32231da177e4SLinus Torvalds * 3224c5aa87bbSTejun Heo * This function sleeps until all work items which were queued on entry 3225c5aa87bbSTejun Heo * have finished execution, but it is not livelocked by new incoming ones. 32261da177e4SLinus Torvalds */ 3227c4f135d6STetsuo Handa void __flush_workqueue(struct workqueue_struct *wq) 32281da177e4SLinus Torvalds { 322973f53c4aSTejun Heo struct wq_flusher this_flusher = { 323073f53c4aSTejun Heo .list = LIST_HEAD_INIT(this_flusher.list), 323173f53c4aSTejun Heo .flush_color = -1, 3232fd1a5b04SByungchul Park .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map), 323373f53c4aSTejun Heo }; 323473f53c4aSTejun Heo int next_color; 3235b1f4ec17SOleg Nesterov 32363347fa09STejun Heo if (WARN_ON(!wq_online)) 32373347fa09STejun Heo return; 32383347fa09STejun Heo 323987915adcSJohannes Berg lock_map_acquire(&wq->lockdep_map); 324087915adcSJohannes Berg lock_map_release(&wq->lockdep_map); 324187915adcSJohannes Berg 32423c25a55dSLai Jiangshan mutex_lock(&wq->mutex); 324373f53c4aSTejun Heo 324473f53c4aSTejun Heo /* 324573f53c4aSTejun Heo * Start-to-wait phase 324673f53c4aSTejun Heo */ 324773f53c4aSTejun Heo next_color = work_next_color(wq->work_color); 324873f53c4aSTejun Heo 324973f53c4aSTejun Heo if (next_color != wq->flush_color) { 325073f53c4aSTejun Heo /* 325173f53c4aSTejun Heo * Color space is not full. The current work_color 325273f53c4aSTejun Heo * becomes our flush_color and work_color is advanced 325373f53c4aSTejun Heo * by one. 325473f53c4aSTejun Heo */ 32556183c009STejun Heo WARN_ON_ONCE(!list_empty(&wq->flusher_overflow)); 325673f53c4aSTejun Heo this_flusher.flush_color = wq->work_color; 325773f53c4aSTejun Heo wq->work_color = next_color; 325873f53c4aSTejun Heo 325973f53c4aSTejun Heo if (!wq->first_flusher) { 326073f53c4aSTejun Heo /* no flush in progress, become the first flusher */ 32616183c009STejun Heo WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 326273f53c4aSTejun Heo 326373f53c4aSTejun Heo wq->first_flusher = &this_flusher; 326473f53c4aSTejun Heo 3265112202d9STejun Heo if (!flush_workqueue_prep_pwqs(wq, wq->flush_color, 326673f53c4aSTejun Heo wq->work_color)) { 326773f53c4aSTejun Heo /* nothing to flush, done */ 326873f53c4aSTejun Heo wq->flush_color = next_color; 326973f53c4aSTejun Heo wq->first_flusher = NULL; 327073f53c4aSTejun Heo goto out_unlock; 327173f53c4aSTejun Heo } 327273f53c4aSTejun Heo } else { 327373f53c4aSTejun Heo /* wait in queue */ 32746183c009STejun Heo WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color); 327573f53c4aSTejun Heo list_add_tail(&this_flusher.list, &wq->flusher_queue); 3276112202d9STejun Heo flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 327773f53c4aSTejun Heo } 327873f53c4aSTejun Heo } else { 327973f53c4aSTejun Heo /* 328073f53c4aSTejun Heo * Oops, color space is full, wait on overflow queue. 328173f53c4aSTejun Heo * The next flush completion will assign us 328273f53c4aSTejun Heo * flush_color and transfer to flusher_queue. 328373f53c4aSTejun Heo */ 328473f53c4aSTejun Heo list_add_tail(&this_flusher.list, &wq->flusher_overflow); 328573f53c4aSTejun Heo } 328673f53c4aSTejun Heo 3287fca839c0STejun Heo check_flush_dependency(wq, NULL); 3288fca839c0STejun Heo 32893c25a55dSLai Jiangshan mutex_unlock(&wq->mutex); 329073f53c4aSTejun Heo 329173f53c4aSTejun Heo wait_for_completion(&this_flusher.done); 329273f53c4aSTejun Heo 329373f53c4aSTejun Heo /* 329473f53c4aSTejun Heo * Wake-up-and-cascade phase 329573f53c4aSTejun Heo * 329673f53c4aSTejun Heo * First flushers are responsible for cascading flushes and 329773f53c4aSTejun Heo * handling overflow. Non-first flushers can simply return. 329873f53c4aSTejun Heo */ 329900d5d15bSChris Wilson if (READ_ONCE(wq->first_flusher) != &this_flusher) 330073f53c4aSTejun Heo return; 330173f53c4aSTejun Heo 33023c25a55dSLai Jiangshan mutex_lock(&wq->mutex); 330373f53c4aSTejun Heo 33044ce48b37STejun Heo /* we might have raced, check again with mutex held */ 33054ce48b37STejun Heo if (wq->first_flusher != &this_flusher) 33064ce48b37STejun Heo goto out_unlock; 33074ce48b37STejun Heo 330800d5d15bSChris Wilson WRITE_ONCE(wq->first_flusher, NULL); 330973f53c4aSTejun Heo 33106183c009STejun Heo WARN_ON_ONCE(!list_empty(&this_flusher.list)); 33116183c009STejun Heo WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 331273f53c4aSTejun Heo 331373f53c4aSTejun Heo while (true) { 331473f53c4aSTejun Heo struct wq_flusher *next, *tmp; 331573f53c4aSTejun Heo 331673f53c4aSTejun Heo /* complete all the flushers sharing the current flush color */ 331773f53c4aSTejun Heo list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { 331873f53c4aSTejun Heo if (next->flush_color != wq->flush_color) 331973f53c4aSTejun Heo break; 332073f53c4aSTejun Heo list_del_init(&next->list); 332173f53c4aSTejun Heo complete(&next->done); 332273f53c4aSTejun Heo } 332373f53c4aSTejun Heo 33246183c009STejun Heo WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) && 332573f53c4aSTejun Heo wq->flush_color != work_next_color(wq->work_color)); 332673f53c4aSTejun Heo 332773f53c4aSTejun Heo /* this flush_color is finished, advance by one */ 332873f53c4aSTejun Heo wq->flush_color = work_next_color(wq->flush_color); 332973f53c4aSTejun Heo 333073f53c4aSTejun Heo /* one color has been freed, handle overflow queue */ 333173f53c4aSTejun Heo if (!list_empty(&wq->flusher_overflow)) { 333273f53c4aSTejun Heo /* 333373f53c4aSTejun Heo * Assign the same color to all overflowed 333473f53c4aSTejun Heo * flushers, advance work_color and append to 333573f53c4aSTejun Heo * flusher_queue. This is the start-to-wait 333673f53c4aSTejun Heo * phase for these overflowed flushers. 333773f53c4aSTejun Heo */ 333873f53c4aSTejun Heo list_for_each_entry(tmp, &wq->flusher_overflow, list) 333973f53c4aSTejun Heo tmp->flush_color = wq->work_color; 334073f53c4aSTejun Heo 334173f53c4aSTejun Heo wq->work_color = work_next_color(wq->work_color); 334273f53c4aSTejun Heo 334373f53c4aSTejun Heo list_splice_tail_init(&wq->flusher_overflow, 334473f53c4aSTejun Heo &wq->flusher_queue); 3345112202d9STejun Heo flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 334673f53c4aSTejun Heo } 334773f53c4aSTejun Heo 334873f53c4aSTejun Heo if (list_empty(&wq->flusher_queue)) { 33496183c009STejun Heo WARN_ON_ONCE(wq->flush_color != wq->work_color); 335073f53c4aSTejun Heo break; 335173f53c4aSTejun Heo } 335273f53c4aSTejun Heo 335373f53c4aSTejun Heo /* 335473f53c4aSTejun Heo * Need to flush more colors. Make the next flusher 3355112202d9STejun Heo * the new first flusher and arm pwqs. 335673f53c4aSTejun Heo */ 33576183c009STejun Heo WARN_ON_ONCE(wq->flush_color == wq->work_color); 33586183c009STejun Heo WARN_ON_ONCE(wq->flush_color != next->flush_color); 335973f53c4aSTejun Heo 336073f53c4aSTejun Heo list_del_init(&next->list); 336173f53c4aSTejun Heo wq->first_flusher = next; 336273f53c4aSTejun Heo 3363112202d9STejun Heo if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1)) 336473f53c4aSTejun Heo break; 336573f53c4aSTejun Heo 336673f53c4aSTejun Heo /* 336773f53c4aSTejun Heo * Meh... this color is already done, clear first 336873f53c4aSTejun Heo * flusher and repeat cascading. 336973f53c4aSTejun Heo */ 337073f53c4aSTejun Heo wq->first_flusher = NULL; 337173f53c4aSTejun Heo } 337273f53c4aSTejun Heo 337373f53c4aSTejun Heo out_unlock: 33743c25a55dSLai Jiangshan mutex_unlock(&wq->mutex); 33751da177e4SLinus Torvalds } 3376c4f135d6STetsuo Handa EXPORT_SYMBOL(__flush_workqueue); 33771da177e4SLinus Torvalds 33789c5a2ba7STejun Heo /** 33799c5a2ba7STejun Heo * drain_workqueue - drain a workqueue 33809c5a2ba7STejun Heo * @wq: workqueue to drain 33819c5a2ba7STejun Heo * 33829c5a2ba7STejun Heo * Wait until the workqueue becomes empty. While draining is in progress, 33839c5a2ba7STejun Heo * only chain queueing is allowed. IOW, only currently pending or running 33849c5a2ba7STejun Heo * work items on @wq can queue further work items on it. @wq is flushed 3385b749b1b6SChen Hanxiao * repeatedly until it becomes empty. The number of flushing is determined 33869c5a2ba7STejun Heo * by the depth of chaining and should be relatively short. Whine if it 33879c5a2ba7STejun Heo * takes too long. 33889c5a2ba7STejun Heo */ 33899c5a2ba7STejun Heo void drain_workqueue(struct workqueue_struct *wq) 33909c5a2ba7STejun Heo { 33919c5a2ba7STejun Heo unsigned int flush_cnt = 0; 339249e3cf44STejun Heo struct pool_workqueue *pwq; 33939c5a2ba7STejun Heo 33949c5a2ba7STejun Heo /* 33959c5a2ba7STejun Heo * __queue_work() needs to test whether there are drainers, is much 33969c5a2ba7STejun Heo * hotter than drain_workqueue() and already looks at @wq->flags. 3397618b01ebSTejun Heo * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers. 33989c5a2ba7STejun Heo */ 339987fc741eSLai Jiangshan mutex_lock(&wq->mutex); 34009c5a2ba7STejun Heo if (!wq->nr_drainers++) 3401618b01ebSTejun Heo wq->flags |= __WQ_DRAINING; 340287fc741eSLai Jiangshan mutex_unlock(&wq->mutex); 34039c5a2ba7STejun Heo reflush: 3404c4f135d6STetsuo Handa __flush_workqueue(wq); 34059c5a2ba7STejun Heo 3406b09f4fd3SLai Jiangshan mutex_lock(&wq->mutex); 340776af4d93STejun Heo 340849e3cf44STejun Heo for_each_pwq(pwq, wq) { 3409fa2563e4SThomas Tuttle bool drained; 34109c5a2ba7STejun Heo 3411a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pwq->pool->lock); 3412bad184d2STejun Heo drained = pwq_is_empty(pwq); 3413a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pwq->pool->lock); 3414fa2563e4SThomas Tuttle 3415fa2563e4SThomas Tuttle if (drained) 34169c5a2ba7STejun Heo continue; 34179c5a2ba7STejun Heo 34189c5a2ba7STejun Heo if (++flush_cnt == 10 || 34199c5a2ba7STejun Heo (flush_cnt % 100 == 0 && flush_cnt <= 1000)) 3420e9ad2eb3SStephen Zhang pr_warn("workqueue %s: %s() isn't complete after %u tries\n", 3421e9ad2eb3SStephen Zhang wq->name, __func__, flush_cnt); 342276af4d93STejun Heo 3423b09f4fd3SLai Jiangshan mutex_unlock(&wq->mutex); 34249c5a2ba7STejun Heo goto reflush; 34259c5a2ba7STejun Heo } 34269c5a2ba7STejun Heo 34279c5a2ba7STejun Heo if (!--wq->nr_drainers) 3428618b01ebSTejun Heo wq->flags &= ~__WQ_DRAINING; 342987fc741eSLai Jiangshan mutex_unlock(&wq->mutex); 34309c5a2ba7STejun Heo } 34319c5a2ba7STejun Heo EXPORT_SYMBOL_GPL(drain_workqueue); 34329c5a2ba7STejun Heo 3433d6e89786SJohannes Berg static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, 3434d6e89786SJohannes Berg bool from_cancel) 3435baf59022STejun Heo { 3436baf59022STejun Heo struct worker *worker = NULL; 3437c9e7cf27STejun Heo struct worker_pool *pool; 3438112202d9STejun Heo struct pool_workqueue *pwq; 3439baf59022STejun Heo 3440baf59022STejun Heo might_sleep(); 3441baf59022STejun Heo 344224acfb71SThomas Gleixner rcu_read_lock(); 3443fa1b54e6STejun Heo pool = get_work_pool(work); 3444fa1b54e6STejun Heo if (!pool) { 344524acfb71SThomas Gleixner rcu_read_unlock(); 3446fa1b54e6STejun Heo return false; 3447fa1b54e6STejun Heo } 3448fa1b54e6STejun Heo 3449a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pool->lock); 34500b3dae68SLai Jiangshan /* see the comment in try_to_grab_pending() with the same code */ 3451112202d9STejun Heo pwq = get_work_pwq(work); 3452112202d9STejun Heo if (pwq) { 3453112202d9STejun Heo if (unlikely(pwq->pool != pool)) 3454baf59022STejun Heo goto already_gone; 3455606a5020STejun Heo } else { 3456c9e7cf27STejun Heo worker = find_worker_executing_work(pool, work); 3457baf59022STejun Heo if (!worker) 3458baf59022STejun Heo goto already_gone; 3459112202d9STejun Heo pwq = worker->current_pwq; 3460606a5020STejun Heo } 3461baf59022STejun Heo 3462fca839c0STejun Heo check_flush_dependency(pwq->wq, work); 3463fca839c0STejun Heo 3464112202d9STejun Heo insert_wq_barrier(pwq, barr, work, worker); 3465a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 3466baf59022STejun Heo 3467e159489bSTejun Heo /* 3468a1d14934SPeter Zijlstra * Force a lock recursion deadlock when using flush_work() inside a 3469a1d14934SPeter Zijlstra * single-threaded or rescuer equipped workqueue. 3470a1d14934SPeter Zijlstra * 3471a1d14934SPeter Zijlstra * For single threaded workqueues the deadlock happens when the work 3472a1d14934SPeter Zijlstra * is after the work issuing the flush_work(). For rescuer equipped 3473a1d14934SPeter Zijlstra * workqueues the deadlock happens when the rescuer stalls, blocking 3474a1d14934SPeter Zijlstra * forward progress. 3475e159489bSTejun Heo */ 3476d6e89786SJohannes Berg if (!from_cancel && 3477d6e89786SJohannes Berg (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) { 3478112202d9STejun Heo lock_map_acquire(&pwq->wq->lockdep_map); 3479112202d9STejun Heo lock_map_release(&pwq->wq->lockdep_map); 3480a1d14934SPeter Zijlstra } 348124acfb71SThomas Gleixner rcu_read_unlock(); 3482baf59022STejun Heo return true; 3483baf59022STejun Heo already_gone: 3484a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 348524acfb71SThomas Gleixner rcu_read_unlock(); 3486baf59022STejun Heo return false; 3487baf59022STejun Heo } 3488baf59022STejun Heo 3489d6e89786SJohannes Berg static bool __flush_work(struct work_struct *work, bool from_cancel) 3490d6e89786SJohannes Berg { 3491d6e89786SJohannes Berg struct wq_barrier barr; 3492d6e89786SJohannes Berg 3493d6e89786SJohannes Berg if (WARN_ON(!wq_online)) 3494d6e89786SJohannes Berg return false; 3495d6e89786SJohannes Berg 34964d43d395STetsuo Handa if (WARN_ON(!work->func)) 34974d43d395STetsuo Handa return false; 34984d43d395STetsuo Handa 349987915adcSJohannes Berg lock_map_acquire(&work->lockdep_map); 350087915adcSJohannes Berg lock_map_release(&work->lockdep_map); 350187915adcSJohannes Berg 3502d6e89786SJohannes Berg if (start_flush_work(work, &barr, from_cancel)) { 3503d6e89786SJohannes Berg wait_for_completion(&barr.done); 3504d6e89786SJohannes Berg destroy_work_on_stack(&barr.work); 3505d6e89786SJohannes Berg return true; 3506d6e89786SJohannes Berg } else { 3507d6e89786SJohannes Berg return false; 3508d6e89786SJohannes Berg } 3509d6e89786SJohannes Berg } 3510d6e89786SJohannes Berg 3511db700897SOleg Nesterov /** 3512401a8d04STejun Heo * flush_work - wait for a work to finish executing the last queueing instance 3513401a8d04STejun Heo * @work: the work to flush 3514db700897SOleg Nesterov * 3515606a5020STejun Heo * Wait until @work has finished execution. @work is guaranteed to be idle 3516606a5020STejun Heo * on return if it hasn't been requeued since flush started. 3517401a8d04STejun Heo * 3518d185af30SYacine Belkadi * Return: 3519401a8d04STejun Heo * %true if flush_work() waited for the work to finish execution, 3520401a8d04STejun Heo * %false if it was already idle. 3521db700897SOleg Nesterov */ 3522401a8d04STejun Heo bool flush_work(struct work_struct *work) 3523db700897SOleg Nesterov { 3524d6e89786SJohannes Berg return __flush_work(work, false); 3525606a5020STejun Heo } 3526db700897SOleg Nesterov EXPORT_SYMBOL_GPL(flush_work); 3527db700897SOleg Nesterov 35288603e1b3STejun Heo struct cwt_wait { 3529ac6424b9SIngo Molnar wait_queue_entry_t wait; 35308603e1b3STejun Heo struct work_struct *work; 35318603e1b3STejun Heo }; 35328603e1b3STejun Heo 3533ac6424b9SIngo Molnar static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 35348603e1b3STejun Heo { 35358603e1b3STejun Heo struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); 35368603e1b3STejun Heo 35378603e1b3STejun Heo if (cwait->work != key) 35388603e1b3STejun Heo return 0; 35398603e1b3STejun Heo return autoremove_wake_function(wait, mode, sync, key); 35408603e1b3STejun Heo } 35418603e1b3STejun Heo 354236e227d2STejun Heo static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) 3543401a8d04STejun Heo { 35448603e1b3STejun Heo static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); 3545bbb68dfaSTejun Heo unsigned long flags; 35461f1f642eSOleg Nesterov int ret; 35471f1f642eSOleg Nesterov 35481f1f642eSOleg Nesterov do { 3549bbb68dfaSTejun Heo ret = try_to_grab_pending(work, is_dwork, &flags); 3550bbb68dfaSTejun Heo /* 35518603e1b3STejun Heo * If someone else is already canceling, wait for it to 35528603e1b3STejun Heo * finish. flush_work() doesn't work for PREEMPT_NONE 35538603e1b3STejun Heo * because we may get scheduled between @work's completion 35548603e1b3STejun Heo * and the other canceling task resuming and clearing 35558603e1b3STejun Heo * CANCELING - flush_work() will return false immediately 35568603e1b3STejun Heo * as @work is no longer busy, try_to_grab_pending() will 35578603e1b3STejun Heo * return -ENOENT as @work is still being canceled and the 35588603e1b3STejun Heo * other canceling task won't be able to clear CANCELING as 35598603e1b3STejun Heo * we're hogging the CPU. 35608603e1b3STejun Heo * 35618603e1b3STejun Heo * Let's wait for completion using a waitqueue. As this 35628603e1b3STejun Heo * may lead to the thundering herd problem, use a custom 35638603e1b3STejun Heo * wake function which matches @work along with exclusive 35648603e1b3STejun Heo * wait and wakeup. 3565bbb68dfaSTejun Heo */ 35668603e1b3STejun Heo if (unlikely(ret == -ENOENT)) { 35678603e1b3STejun Heo struct cwt_wait cwait; 35688603e1b3STejun Heo 35698603e1b3STejun Heo init_wait(&cwait.wait); 35708603e1b3STejun Heo cwait.wait.func = cwt_wakefn; 35718603e1b3STejun Heo cwait.work = work; 35728603e1b3STejun Heo 35738603e1b3STejun Heo prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait, 35748603e1b3STejun Heo TASK_UNINTERRUPTIBLE); 35758603e1b3STejun Heo if (work_is_canceling(work)) 35768603e1b3STejun Heo schedule(); 35778603e1b3STejun Heo finish_wait(&cancel_waitq, &cwait.wait); 35788603e1b3STejun Heo } 35791f1f642eSOleg Nesterov } while (unlikely(ret < 0)); 35801f1f642eSOleg Nesterov 3581bbb68dfaSTejun Heo /* tell other tasks trying to grab @work to back off */ 3582bbb68dfaSTejun Heo mark_work_canceling(work); 3583bbb68dfaSTejun Heo local_irq_restore(flags); 3584bbb68dfaSTejun Heo 35853347fa09STejun Heo /* 35863347fa09STejun Heo * This allows canceling during early boot. We know that @work 35873347fa09STejun Heo * isn't executing. 35883347fa09STejun Heo */ 35893347fa09STejun Heo if (wq_online) 3590d6e89786SJohannes Berg __flush_work(work, true); 35913347fa09STejun Heo 35927a22ad75STejun Heo clear_work_data(work); 35938603e1b3STejun Heo 35948603e1b3STejun Heo /* 35958603e1b3STejun Heo * Paired with prepare_to_wait() above so that either 35968603e1b3STejun Heo * waitqueue_active() is visible here or !work_is_canceling() is 35978603e1b3STejun Heo * visible there. 35988603e1b3STejun Heo */ 35998603e1b3STejun Heo smp_mb(); 36008603e1b3STejun Heo if (waitqueue_active(&cancel_waitq)) 36018603e1b3STejun Heo __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); 36028603e1b3STejun Heo 36031f1f642eSOleg Nesterov return ret; 36041f1f642eSOleg Nesterov } 36051f1f642eSOleg Nesterov 36066e84d644SOleg Nesterov /** 3607401a8d04STejun Heo * cancel_work_sync - cancel a work and wait for it to finish 3608401a8d04STejun Heo * @work: the work to cancel 36096e84d644SOleg Nesterov * 3610401a8d04STejun Heo * Cancel @work and wait for its execution to finish. This function 3611401a8d04STejun Heo * can be used even if the work re-queues itself or migrates to 3612401a8d04STejun Heo * another workqueue. On return from this function, @work is 3613401a8d04STejun Heo * guaranteed to be not pending or executing on any CPU. 36141f1f642eSOleg Nesterov * 3615401a8d04STejun Heo * cancel_work_sync(&delayed_work->work) must not be used for 3616401a8d04STejun Heo * delayed_work's. Use cancel_delayed_work_sync() instead. 36176e84d644SOleg Nesterov * 3618401a8d04STejun Heo * The caller must ensure that the workqueue on which @work was last 36196e84d644SOleg Nesterov * queued can't be destroyed before this function returns. 3620401a8d04STejun Heo * 3621d185af30SYacine Belkadi * Return: 3622401a8d04STejun Heo * %true if @work was pending, %false otherwise. 36236e84d644SOleg Nesterov */ 3624401a8d04STejun Heo bool cancel_work_sync(struct work_struct *work) 36256e84d644SOleg Nesterov { 362636e227d2STejun Heo return __cancel_work_timer(work, false); 3627b89deed3SOleg Nesterov } 362828e53bddSOleg Nesterov EXPORT_SYMBOL_GPL(cancel_work_sync); 3629b89deed3SOleg Nesterov 36306e84d644SOleg Nesterov /** 3631401a8d04STejun Heo * flush_delayed_work - wait for a dwork to finish executing the last queueing 3632401a8d04STejun Heo * @dwork: the delayed work to flush 36336e84d644SOleg Nesterov * 3634401a8d04STejun Heo * Delayed timer is cancelled and the pending work is queued for 3635401a8d04STejun Heo * immediate execution. Like flush_work(), this function only 3636401a8d04STejun Heo * considers the last queueing instance of @dwork. 36371f1f642eSOleg Nesterov * 3638d185af30SYacine Belkadi * Return: 3639401a8d04STejun Heo * %true if flush_work() waited for the work to finish execution, 3640401a8d04STejun Heo * %false if it was already idle. 36416e84d644SOleg Nesterov */ 3642401a8d04STejun Heo bool flush_delayed_work(struct delayed_work *dwork) 3643401a8d04STejun Heo { 36448930cabaSTejun Heo local_irq_disable(); 3645401a8d04STejun Heo if (del_timer_sync(&dwork->timer)) 364660c057bcSLai Jiangshan __queue_work(dwork->cpu, dwork->wq, &dwork->work); 36478930cabaSTejun Heo local_irq_enable(); 3648401a8d04STejun Heo return flush_work(&dwork->work); 3649401a8d04STejun Heo } 3650401a8d04STejun Heo EXPORT_SYMBOL(flush_delayed_work); 3651401a8d04STejun Heo 365205f0fe6bSTejun Heo /** 365305f0fe6bSTejun Heo * flush_rcu_work - wait for a rwork to finish executing the last queueing 365405f0fe6bSTejun Heo * @rwork: the rcu work to flush 365505f0fe6bSTejun Heo * 365605f0fe6bSTejun Heo * Return: 365705f0fe6bSTejun Heo * %true if flush_rcu_work() waited for the work to finish execution, 365805f0fe6bSTejun Heo * %false if it was already idle. 365905f0fe6bSTejun Heo */ 366005f0fe6bSTejun Heo bool flush_rcu_work(struct rcu_work *rwork) 366105f0fe6bSTejun Heo { 366205f0fe6bSTejun Heo if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) { 366305f0fe6bSTejun Heo rcu_barrier(); 366405f0fe6bSTejun Heo flush_work(&rwork->work); 366505f0fe6bSTejun Heo return true; 366605f0fe6bSTejun Heo } else { 366705f0fe6bSTejun Heo return flush_work(&rwork->work); 366805f0fe6bSTejun Heo } 366905f0fe6bSTejun Heo } 367005f0fe6bSTejun Heo EXPORT_SYMBOL(flush_rcu_work); 367105f0fe6bSTejun Heo 3672f72b8792SJens Axboe static bool __cancel_work(struct work_struct *work, bool is_dwork) 3673f72b8792SJens Axboe { 3674f72b8792SJens Axboe unsigned long flags; 3675f72b8792SJens Axboe int ret; 3676f72b8792SJens Axboe 3677f72b8792SJens Axboe do { 3678f72b8792SJens Axboe ret = try_to_grab_pending(work, is_dwork, &flags); 3679f72b8792SJens Axboe } while (unlikely(ret == -EAGAIN)); 3680f72b8792SJens Axboe 3681f72b8792SJens Axboe if (unlikely(ret < 0)) 3682f72b8792SJens Axboe return false; 3683f72b8792SJens Axboe 3684f72b8792SJens Axboe set_work_pool_and_clear_pending(work, get_work_pool_id(work)); 3685f72b8792SJens Axboe local_irq_restore(flags); 3686f72b8792SJens Axboe return ret; 3687f72b8792SJens Axboe } 3688f72b8792SJens Axboe 368973b4b532SAndrey Grodzovsky /* 369073b4b532SAndrey Grodzovsky * See cancel_delayed_work() 369173b4b532SAndrey Grodzovsky */ 369273b4b532SAndrey Grodzovsky bool cancel_work(struct work_struct *work) 369373b4b532SAndrey Grodzovsky { 369473b4b532SAndrey Grodzovsky return __cancel_work(work, false); 369573b4b532SAndrey Grodzovsky } 369673b4b532SAndrey Grodzovsky EXPORT_SYMBOL(cancel_work); 369773b4b532SAndrey Grodzovsky 3698401a8d04STejun Heo /** 369957b30ae7STejun Heo * cancel_delayed_work - cancel a delayed work 370057b30ae7STejun Heo * @dwork: delayed_work to cancel 370109383498STejun Heo * 3702d185af30SYacine Belkadi * Kill off a pending delayed_work. 3703d185af30SYacine Belkadi * 3704d185af30SYacine Belkadi * Return: %true if @dwork was pending and canceled; %false if it wasn't 3705d185af30SYacine Belkadi * pending. 3706d185af30SYacine Belkadi * 3707d185af30SYacine Belkadi * Note: 3708d185af30SYacine Belkadi * The work callback function may still be running on return, unless 3709d185af30SYacine Belkadi * it returns %true and the work doesn't re-arm itself. Explicitly flush or 3710d185af30SYacine Belkadi * use cancel_delayed_work_sync() to wait on it. 371109383498STejun Heo * 371257b30ae7STejun Heo * This function is safe to call from any context including IRQ handler. 371309383498STejun Heo */ 371457b30ae7STejun Heo bool cancel_delayed_work(struct delayed_work *dwork) 371509383498STejun Heo { 3716f72b8792SJens Axboe return __cancel_work(&dwork->work, true); 371709383498STejun Heo } 371857b30ae7STejun Heo EXPORT_SYMBOL(cancel_delayed_work); 371909383498STejun Heo 372009383498STejun Heo /** 3721401a8d04STejun Heo * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish 3722401a8d04STejun Heo * @dwork: the delayed work cancel 3723401a8d04STejun Heo * 3724401a8d04STejun Heo * This is cancel_work_sync() for delayed works. 3725401a8d04STejun Heo * 3726d185af30SYacine Belkadi * Return: 3727401a8d04STejun Heo * %true if @dwork was pending, %false otherwise. 3728401a8d04STejun Heo */ 3729401a8d04STejun Heo bool cancel_delayed_work_sync(struct delayed_work *dwork) 37306e84d644SOleg Nesterov { 373136e227d2STejun Heo return __cancel_work_timer(&dwork->work, true); 37326e84d644SOleg Nesterov } 3733f5a421a4SOleg Nesterov EXPORT_SYMBOL(cancel_delayed_work_sync); 37341da177e4SLinus Torvalds 37350fcb78c2SRolf Eike Beer /** 373631ddd871STejun Heo * schedule_on_each_cpu - execute a function synchronously on each online CPU 3737b6136773SAndrew Morton * @func: the function to call 3738b6136773SAndrew Morton * 373931ddd871STejun Heo * schedule_on_each_cpu() executes @func on each online CPU using the 374031ddd871STejun Heo * system workqueue and blocks until all CPUs have completed. 3741b6136773SAndrew Morton * schedule_on_each_cpu() is very slow. 374231ddd871STejun Heo * 3743d185af30SYacine Belkadi * Return: 374431ddd871STejun Heo * 0 on success, -errno on failure. 3745b6136773SAndrew Morton */ 374665f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func) 374715316ba8SChristoph Lameter { 374815316ba8SChristoph Lameter int cpu; 374938f51568SNamhyung Kim struct work_struct __percpu *works; 375015316ba8SChristoph Lameter 3751b6136773SAndrew Morton works = alloc_percpu(struct work_struct); 3752b6136773SAndrew Morton if (!works) 375315316ba8SChristoph Lameter return -ENOMEM; 3754b6136773SAndrew Morton 3755ffd8bea8SSebastian Andrzej Siewior cpus_read_lock(); 375693981800STejun Heo 375715316ba8SChristoph Lameter for_each_online_cpu(cpu) { 37589bfb1839SIngo Molnar struct work_struct *work = per_cpu_ptr(works, cpu); 37599bfb1839SIngo Molnar 37609bfb1839SIngo Molnar INIT_WORK(work, func); 37618de6d308SOleg Nesterov schedule_work_on(cpu, work); 376215316ba8SChristoph Lameter } 376393981800STejun Heo 376493981800STejun Heo for_each_online_cpu(cpu) 37658616a89aSOleg Nesterov flush_work(per_cpu_ptr(works, cpu)); 376693981800STejun Heo 3767ffd8bea8SSebastian Andrzej Siewior cpus_read_unlock(); 3768b6136773SAndrew Morton free_percpu(works); 376915316ba8SChristoph Lameter return 0; 377015316ba8SChristoph Lameter } 377115316ba8SChristoph Lameter 3772eef6a7d5SAlan Stern /** 37731fa44ecaSJames Bottomley * execute_in_process_context - reliably execute the routine with user context 37741fa44ecaSJames Bottomley * @fn: the function to execute 37751fa44ecaSJames Bottomley * @ew: guaranteed storage for the execute work structure (must 37761fa44ecaSJames Bottomley * be available when the work executes) 37771fa44ecaSJames Bottomley * 37781fa44ecaSJames Bottomley * Executes the function immediately if process context is available, 37791fa44ecaSJames Bottomley * otherwise schedules the function for delayed execution. 37801fa44ecaSJames Bottomley * 3781d185af30SYacine Belkadi * Return: 0 - function was executed 37821fa44ecaSJames Bottomley * 1 - function was scheduled for execution 37831fa44ecaSJames Bottomley */ 378465f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew) 37851fa44ecaSJames Bottomley { 37861fa44ecaSJames Bottomley if (!in_interrupt()) { 378765f27f38SDavid Howells fn(&ew->work); 37881fa44ecaSJames Bottomley return 0; 37891fa44ecaSJames Bottomley } 37901fa44ecaSJames Bottomley 379165f27f38SDavid Howells INIT_WORK(&ew->work, fn); 37921fa44ecaSJames Bottomley schedule_work(&ew->work); 37931fa44ecaSJames Bottomley 37941fa44ecaSJames Bottomley return 1; 37951fa44ecaSJames Bottomley } 37961fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context); 37971fa44ecaSJames Bottomley 37987a4e344cSTejun Heo /** 37997a4e344cSTejun Heo * free_workqueue_attrs - free a workqueue_attrs 38007a4e344cSTejun Heo * @attrs: workqueue_attrs to free 38017a4e344cSTejun Heo * 38027a4e344cSTejun Heo * Undo alloc_workqueue_attrs(). 38037a4e344cSTejun Heo */ 3804513c98d0SDaniel Jordan void free_workqueue_attrs(struct workqueue_attrs *attrs) 38057a4e344cSTejun Heo { 38067a4e344cSTejun Heo if (attrs) { 38077a4e344cSTejun Heo free_cpumask_var(attrs->cpumask); 38089546b29eSTejun Heo free_cpumask_var(attrs->__pod_cpumask); 38097a4e344cSTejun Heo kfree(attrs); 38107a4e344cSTejun Heo } 38117a4e344cSTejun Heo } 38127a4e344cSTejun Heo 38137a4e344cSTejun Heo /** 38147a4e344cSTejun Heo * alloc_workqueue_attrs - allocate a workqueue_attrs 38157a4e344cSTejun Heo * 38167a4e344cSTejun Heo * Allocate a new workqueue_attrs, initialize with default settings and 3817d185af30SYacine Belkadi * return it. 3818d185af30SYacine Belkadi * 3819d185af30SYacine Belkadi * Return: The allocated new workqueue_attr on success. %NULL on failure. 38207a4e344cSTejun Heo */ 3821513c98d0SDaniel Jordan struct workqueue_attrs *alloc_workqueue_attrs(void) 38227a4e344cSTejun Heo { 38237a4e344cSTejun Heo struct workqueue_attrs *attrs; 38247a4e344cSTejun Heo 3825be69d00dSThomas Gleixner attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); 38267a4e344cSTejun Heo if (!attrs) 38277a4e344cSTejun Heo goto fail; 3828be69d00dSThomas Gleixner if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL)) 38297a4e344cSTejun Heo goto fail; 38309546b29eSTejun Heo if (!alloc_cpumask_var(&attrs->__pod_cpumask, GFP_KERNEL)) 38319546b29eSTejun Heo goto fail; 38327a4e344cSTejun Heo 383313e2e556STejun Heo cpumask_copy(attrs->cpumask, cpu_possible_mask); 3834523a301eSTejun Heo attrs->affn_scope = WQ_AFFN_DFL; 38357a4e344cSTejun Heo return attrs; 38367a4e344cSTejun Heo fail: 38377a4e344cSTejun Heo free_workqueue_attrs(attrs); 38387a4e344cSTejun Heo return NULL; 38397a4e344cSTejun Heo } 38407a4e344cSTejun Heo 384129c91e99STejun Heo static void copy_workqueue_attrs(struct workqueue_attrs *to, 384229c91e99STejun Heo const struct workqueue_attrs *from) 384329c91e99STejun Heo { 384429c91e99STejun Heo to->nice = from->nice; 384529c91e99STejun Heo cpumask_copy(to->cpumask, from->cpumask); 38469546b29eSTejun Heo cpumask_copy(to->__pod_cpumask, from->__pod_cpumask); 38478639ecebSTejun Heo to->affn_strict = from->affn_strict; 384884193c07STejun Heo 38492865a8fbSShaohua Li /* 385084193c07STejun Heo * Unlike hash and equality test, copying shouldn't ignore wq-only 385184193c07STejun Heo * fields as copying is used for both pool and wq attrs. Instead, 385284193c07STejun Heo * get_unbound_pool() explicitly clears the fields. 38532865a8fbSShaohua Li */ 385484193c07STejun Heo to->affn_scope = from->affn_scope; 3855af73f5c9STejun Heo to->ordered = from->ordered; 385629c91e99STejun Heo } 385729c91e99STejun Heo 38585de7a03cSTejun Heo /* 38595de7a03cSTejun Heo * Some attrs fields are workqueue-only. Clear them for worker_pool's. See the 38605de7a03cSTejun Heo * comments in 'struct workqueue_attrs' definition. 38615de7a03cSTejun Heo */ 38625de7a03cSTejun Heo static void wqattrs_clear_for_pool(struct workqueue_attrs *attrs) 38635de7a03cSTejun Heo { 386484193c07STejun Heo attrs->affn_scope = WQ_AFFN_NR_TYPES; 38655de7a03cSTejun Heo attrs->ordered = false; 38665de7a03cSTejun Heo } 38675de7a03cSTejun Heo 386829c91e99STejun Heo /* hash value of the content of @attr */ 386929c91e99STejun Heo static u32 wqattrs_hash(const struct workqueue_attrs *attrs) 387029c91e99STejun Heo { 387129c91e99STejun Heo u32 hash = 0; 387229c91e99STejun Heo 387329c91e99STejun Heo hash = jhash_1word(attrs->nice, hash); 387413e2e556STejun Heo hash = jhash(cpumask_bits(attrs->cpumask), 387513e2e556STejun Heo BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash); 38769546b29eSTejun Heo hash = jhash(cpumask_bits(attrs->__pod_cpumask), 38779546b29eSTejun Heo BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash); 38788639ecebSTejun Heo hash = jhash_1word(attrs->affn_strict, hash); 387929c91e99STejun Heo return hash; 388029c91e99STejun Heo } 388129c91e99STejun Heo 388229c91e99STejun Heo /* content equality test */ 388329c91e99STejun Heo static bool wqattrs_equal(const struct workqueue_attrs *a, 388429c91e99STejun Heo const struct workqueue_attrs *b) 388529c91e99STejun Heo { 388629c91e99STejun Heo if (a->nice != b->nice) 388729c91e99STejun Heo return false; 388829c91e99STejun Heo if (!cpumask_equal(a->cpumask, b->cpumask)) 388929c91e99STejun Heo return false; 38909546b29eSTejun Heo if (!cpumask_equal(a->__pod_cpumask, b->__pod_cpumask)) 38919546b29eSTejun Heo return false; 38928639ecebSTejun Heo if (a->affn_strict != b->affn_strict) 38938639ecebSTejun Heo return false; 389429c91e99STejun Heo return true; 389529c91e99STejun Heo } 389629c91e99STejun Heo 38970f36ee24STejun Heo /* Update @attrs with actually available CPUs */ 38980f36ee24STejun Heo static void wqattrs_actualize_cpumask(struct workqueue_attrs *attrs, 38990f36ee24STejun Heo const cpumask_t *unbound_cpumask) 39000f36ee24STejun Heo { 39010f36ee24STejun Heo /* 39020f36ee24STejun Heo * Calculate the effective CPU mask of @attrs given @unbound_cpumask. If 39030f36ee24STejun Heo * @attrs->cpumask doesn't overlap with @unbound_cpumask, we fallback to 39040f36ee24STejun Heo * @unbound_cpumask. 39050f36ee24STejun Heo */ 39060f36ee24STejun Heo cpumask_and(attrs->cpumask, attrs->cpumask, unbound_cpumask); 39070f36ee24STejun Heo if (unlikely(cpumask_empty(attrs->cpumask))) 39080f36ee24STejun Heo cpumask_copy(attrs->cpumask, unbound_cpumask); 39090f36ee24STejun Heo } 39100f36ee24STejun Heo 391184193c07STejun Heo /* find wq_pod_type to use for @attrs */ 391284193c07STejun Heo static const struct wq_pod_type * 391384193c07STejun Heo wqattrs_pod_type(const struct workqueue_attrs *attrs) 391484193c07STejun Heo { 3915523a301eSTejun Heo enum wq_affn_scope scope; 3916523a301eSTejun Heo struct wq_pod_type *pt; 3917523a301eSTejun Heo 3918523a301eSTejun Heo /* to synchronize access to wq_affn_dfl */ 3919523a301eSTejun Heo lockdep_assert_held(&wq_pool_mutex); 3920523a301eSTejun Heo 3921523a301eSTejun Heo if (attrs->affn_scope == WQ_AFFN_DFL) 3922523a301eSTejun Heo scope = wq_affn_dfl; 3923523a301eSTejun Heo else 3924523a301eSTejun Heo scope = attrs->affn_scope; 3925523a301eSTejun Heo 3926523a301eSTejun Heo pt = &wq_pod_types[scope]; 392784193c07STejun Heo 392884193c07STejun Heo if (!WARN_ON_ONCE(attrs->affn_scope == WQ_AFFN_NR_TYPES) && 392984193c07STejun Heo likely(pt->nr_pods)) 393084193c07STejun Heo return pt; 393184193c07STejun Heo 393284193c07STejun Heo /* 393384193c07STejun Heo * Before workqueue_init_topology(), only SYSTEM is available which is 393484193c07STejun Heo * initialized in workqueue_init_early(). 393584193c07STejun Heo */ 393684193c07STejun Heo pt = &wq_pod_types[WQ_AFFN_SYSTEM]; 393784193c07STejun Heo BUG_ON(!pt->nr_pods); 393884193c07STejun Heo return pt; 393984193c07STejun Heo } 394084193c07STejun Heo 39417a4e344cSTejun Heo /** 39427a4e344cSTejun Heo * init_worker_pool - initialize a newly zalloc'd worker_pool 39437a4e344cSTejun Heo * @pool: worker_pool to initialize 39447a4e344cSTejun Heo * 3945402dd89dSShailendra Verma * Initialize a newly zalloc'd @pool. It also allocates @pool->attrs. 3946d185af30SYacine Belkadi * 3947d185af30SYacine Belkadi * Return: 0 on success, -errno on failure. Even on failure, all fields 394829c91e99STejun Heo * inside @pool proper are initialized and put_unbound_pool() can be called 394929c91e99STejun Heo * on @pool safely to release it. 39507a4e344cSTejun Heo */ 39517a4e344cSTejun Heo static int init_worker_pool(struct worker_pool *pool) 39524e1a1f9aSTejun Heo { 3953a9b8a985SSebastian Andrzej Siewior raw_spin_lock_init(&pool->lock); 395429c91e99STejun Heo pool->id = -1; 395529c91e99STejun Heo pool->cpu = -1; 3956f3f90ad4STejun Heo pool->node = NUMA_NO_NODE; 39574e1a1f9aSTejun Heo pool->flags |= POOL_DISASSOCIATED; 395882607adcSTejun Heo pool->watchdog_ts = jiffies; 39594e1a1f9aSTejun Heo INIT_LIST_HEAD(&pool->worklist); 39604e1a1f9aSTejun Heo INIT_LIST_HEAD(&pool->idle_list); 39614e1a1f9aSTejun Heo hash_init(pool->busy_hash); 39624e1a1f9aSTejun Heo 396332a6c723SKees Cook timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE); 39643f959aa3SValentin Schneider INIT_WORK(&pool->idle_cull_work, idle_cull_fn); 39654e1a1f9aSTejun Heo 396632a6c723SKees Cook timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0); 39674e1a1f9aSTejun Heo 3968da028469SLai Jiangshan INIT_LIST_HEAD(&pool->workers); 3969e02b9312SValentin Schneider INIT_LIST_HEAD(&pool->dying_workers); 39707a4e344cSTejun Heo 39717cda9aaeSLai Jiangshan ida_init(&pool->worker_ida); 397229c91e99STejun Heo INIT_HLIST_NODE(&pool->hash_node); 397329c91e99STejun Heo pool->refcnt = 1; 397429c91e99STejun Heo 397529c91e99STejun Heo /* shouldn't fail above this point */ 3976be69d00dSThomas Gleixner pool->attrs = alloc_workqueue_attrs(); 39777a4e344cSTejun Heo if (!pool->attrs) 39787a4e344cSTejun Heo return -ENOMEM; 39795de7a03cSTejun Heo 39805de7a03cSTejun Heo wqattrs_clear_for_pool(pool->attrs); 39815de7a03cSTejun Heo 39827a4e344cSTejun Heo return 0; 39834e1a1f9aSTejun Heo } 39844e1a1f9aSTejun Heo 3985669de8bdSBart Van Assche #ifdef CONFIG_LOCKDEP 3986669de8bdSBart Van Assche static void wq_init_lockdep(struct workqueue_struct *wq) 3987669de8bdSBart Van Assche { 3988669de8bdSBart Van Assche char *lock_name; 3989669de8bdSBart Van Assche 3990669de8bdSBart Van Assche lockdep_register_key(&wq->key); 3991669de8bdSBart Van Assche lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name); 3992669de8bdSBart Van Assche if (!lock_name) 3993669de8bdSBart Van Assche lock_name = wq->name; 399469a106c0SQian Cai 399569a106c0SQian Cai wq->lock_name = lock_name; 3996669de8bdSBart Van Assche lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0); 3997669de8bdSBart Van Assche } 3998669de8bdSBart Van Assche 3999669de8bdSBart Van Assche static void wq_unregister_lockdep(struct workqueue_struct *wq) 4000669de8bdSBart Van Assche { 4001669de8bdSBart Van Assche lockdep_unregister_key(&wq->key); 4002669de8bdSBart Van Assche } 4003669de8bdSBart Van Assche 4004669de8bdSBart Van Assche static void wq_free_lockdep(struct workqueue_struct *wq) 4005669de8bdSBart Van Assche { 4006669de8bdSBart Van Assche if (wq->lock_name != wq->name) 4007669de8bdSBart Van Assche kfree(wq->lock_name); 4008669de8bdSBart Van Assche } 4009669de8bdSBart Van Assche #else 4010669de8bdSBart Van Assche static void wq_init_lockdep(struct workqueue_struct *wq) 4011669de8bdSBart Van Assche { 4012669de8bdSBart Van Assche } 4013669de8bdSBart Van Assche 4014669de8bdSBart Van Assche static void wq_unregister_lockdep(struct workqueue_struct *wq) 4015669de8bdSBart Van Assche { 4016669de8bdSBart Van Assche } 4017669de8bdSBart Van Assche 4018669de8bdSBart Van Assche static void wq_free_lockdep(struct workqueue_struct *wq) 4019669de8bdSBart Van Assche { 4020669de8bdSBart Van Assche } 4021669de8bdSBart Van Assche #endif 4022669de8bdSBart Van Assche 4023e2dca7adSTejun Heo static void rcu_free_wq(struct rcu_head *rcu) 4024e2dca7adSTejun Heo { 4025e2dca7adSTejun Heo struct workqueue_struct *wq = 4026e2dca7adSTejun Heo container_of(rcu, struct workqueue_struct, rcu); 4027e2dca7adSTejun Heo 4028669de8bdSBart Van Assche wq_free_lockdep(wq); 4029ee1ceef7STejun Heo free_percpu(wq->cpu_pwq); 4030e2dca7adSTejun Heo free_workqueue_attrs(wq->unbound_attrs); 4031e2dca7adSTejun Heo kfree(wq); 4032e2dca7adSTejun Heo } 4033e2dca7adSTejun Heo 403429c91e99STejun Heo static void rcu_free_pool(struct rcu_head *rcu) 403529c91e99STejun Heo { 403629c91e99STejun Heo struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); 403729c91e99STejun Heo 40387cda9aaeSLai Jiangshan ida_destroy(&pool->worker_ida); 403929c91e99STejun Heo free_workqueue_attrs(pool->attrs); 404029c91e99STejun Heo kfree(pool); 404129c91e99STejun Heo } 404229c91e99STejun Heo 404329c91e99STejun Heo /** 404429c91e99STejun Heo * put_unbound_pool - put a worker_pool 404529c91e99STejun Heo * @pool: worker_pool to put 404629c91e99STejun Heo * 404724acfb71SThomas Gleixner * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU 4048c5aa87bbSTejun Heo * safe manner. get_unbound_pool() calls this function on its failure path 4049c5aa87bbSTejun Heo * and this function should be able to release pools which went through, 4050c5aa87bbSTejun Heo * successfully or not, init_worker_pool(). 4051a892caccSTejun Heo * 4052a892caccSTejun Heo * Should be called with wq_pool_mutex held. 405329c91e99STejun Heo */ 405429c91e99STejun Heo static void put_unbound_pool(struct worker_pool *pool) 405529c91e99STejun Heo { 405660f5a4bcSLai Jiangshan DECLARE_COMPLETION_ONSTACK(detach_completion); 405729c91e99STejun Heo struct worker *worker; 40589680540cSYang Yingliang LIST_HEAD(cull_list); 4059e02b9312SValentin Schneider 4060a892caccSTejun Heo lockdep_assert_held(&wq_pool_mutex); 4061a892caccSTejun Heo 4062a892caccSTejun Heo if (--pool->refcnt) 406329c91e99STejun Heo return; 406429c91e99STejun Heo 406529c91e99STejun Heo /* sanity checks */ 406661d0fbb4SLai Jiangshan if (WARN_ON(!(pool->cpu < 0)) || 4067a892caccSTejun Heo WARN_ON(!list_empty(&pool->worklist))) 406829c91e99STejun Heo return; 406929c91e99STejun Heo 407029c91e99STejun Heo /* release id and unhash */ 407129c91e99STejun Heo if (pool->id >= 0) 407229c91e99STejun Heo idr_remove(&worker_pool_idr, pool->id); 407329c91e99STejun Heo hash_del(&pool->hash_node); 407429c91e99STejun Heo 4075c5aa87bbSTejun Heo /* 4076692b4825STejun Heo * Become the manager and destroy all workers. This prevents 4077692b4825STejun Heo * @pool's workers from blocking on attach_mutex. We're the last 4078692b4825STejun Heo * manager and @pool gets freed with the flag set. 40799ab03be4SValentin Schneider * 40809ab03be4SValentin Schneider * Having a concurrent manager is quite unlikely to happen as we can 40819ab03be4SValentin Schneider * only get here with 40829ab03be4SValentin Schneider * pwq->refcnt == pool->refcnt == 0 40839ab03be4SValentin Schneider * which implies no work queued to the pool, which implies no worker can 40849ab03be4SValentin Schneider * become the manager. However a worker could have taken the role of 40859ab03be4SValentin Schneider * manager before the refcnts dropped to 0, since maybe_create_worker() 40869ab03be4SValentin Schneider * drops pool->lock 4087c5aa87bbSTejun Heo */ 40889ab03be4SValentin Schneider while (true) { 40899ab03be4SValentin Schneider rcuwait_wait_event(&manager_wait, 40909ab03be4SValentin Schneider !(pool->flags & POOL_MANAGER_ACTIVE), 4091d8bb65abSSebastian Andrzej Siewior TASK_UNINTERRUPTIBLE); 4092e02b9312SValentin Schneider 4093e02b9312SValentin Schneider mutex_lock(&wq_pool_attach_mutex); 40949ab03be4SValentin Schneider raw_spin_lock_irq(&pool->lock); 40959ab03be4SValentin Schneider if (!(pool->flags & POOL_MANAGER_ACTIVE)) { 4096692b4825STejun Heo pool->flags |= POOL_MANAGER_ACTIVE; 40979ab03be4SValentin Schneider break; 40989ab03be4SValentin Schneider } 40999ab03be4SValentin Schneider raw_spin_unlock_irq(&pool->lock); 4100e02b9312SValentin Schneider mutex_unlock(&wq_pool_attach_mutex); 41019ab03be4SValentin Schneider } 4102692b4825STejun Heo 41031037de36SLai Jiangshan while ((worker = first_idle_worker(pool))) 4104e02b9312SValentin Schneider set_worker_dying(worker, &cull_list); 410529c91e99STejun Heo WARN_ON(pool->nr_workers || pool->nr_idle); 4106a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 410760f5a4bcSLai Jiangshan 4108e02b9312SValentin Schneider wake_dying_workers(&cull_list); 4109e02b9312SValentin Schneider 4110e02b9312SValentin Schneider if (!list_empty(&pool->workers) || !list_empty(&pool->dying_workers)) 411160f5a4bcSLai Jiangshan pool->detach_completion = &detach_completion; 41121258fae7STejun Heo mutex_unlock(&wq_pool_attach_mutex); 411360f5a4bcSLai Jiangshan 411460f5a4bcSLai Jiangshan if (pool->detach_completion) 411560f5a4bcSLai Jiangshan wait_for_completion(pool->detach_completion); 411660f5a4bcSLai Jiangshan 411729c91e99STejun Heo /* shut down the timers */ 411829c91e99STejun Heo del_timer_sync(&pool->idle_timer); 41193f959aa3SValentin Schneider cancel_work_sync(&pool->idle_cull_work); 412029c91e99STejun Heo del_timer_sync(&pool->mayday_timer); 412129c91e99STejun Heo 412224acfb71SThomas Gleixner /* RCU protected to allow dereferences from get_work_pool() */ 412325b00775SPaul E. McKenney call_rcu(&pool->rcu, rcu_free_pool); 412429c91e99STejun Heo } 412529c91e99STejun Heo 412629c91e99STejun Heo /** 412729c91e99STejun Heo * get_unbound_pool - get a worker_pool with the specified attributes 412829c91e99STejun Heo * @attrs: the attributes of the worker_pool to get 412929c91e99STejun Heo * 413029c91e99STejun Heo * Obtain a worker_pool which has the same attributes as @attrs, bump the 413129c91e99STejun Heo * reference count and return it. If there already is a matching 413229c91e99STejun Heo * worker_pool, it will be used; otherwise, this function attempts to 4133d185af30SYacine Belkadi * create a new one. 4134a892caccSTejun Heo * 4135a892caccSTejun Heo * Should be called with wq_pool_mutex held. 4136d185af30SYacine Belkadi * 4137d185af30SYacine Belkadi * Return: On success, a worker_pool with the same attributes as @attrs. 4138d185af30SYacine Belkadi * On failure, %NULL. 413929c91e99STejun Heo */ 414029c91e99STejun Heo static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) 414129c91e99STejun Heo { 414284193c07STejun Heo struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_NUMA]; 414329c91e99STejun Heo u32 hash = wqattrs_hash(attrs); 414429c91e99STejun Heo struct worker_pool *pool; 414584193c07STejun Heo int pod, node = NUMA_NO_NODE; 414629c91e99STejun Heo 4147a892caccSTejun Heo lockdep_assert_held(&wq_pool_mutex); 414829c91e99STejun Heo 414929c91e99STejun Heo /* do we already have a matching pool? */ 415029c91e99STejun Heo hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { 415129c91e99STejun Heo if (wqattrs_equal(pool->attrs, attrs)) { 415229c91e99STejun Heo pool->refcnt++; 41533fb1823cSLai Jiangshan return pool; 415429c91e99STejun Heo } 415529c91e99STejun Heo } 415629c91e99STejun Heo 41579546b29eSTejun Heo /* If __pod_cpumask is contained inside a NUMA pod, that's our node */ 415884193c07STejun Heo for (pod = 0; pod < pt->nr_pods; pod++) { 41599546b29eSTejun Heo if (cpumask_subset(attrs->__pod_cpumask, pt->pod_cpus[pod])) { 416084193c07STejun Heo node = pt->pod_node[pod]; 4161e2273584SXunlei Pang break; 4162e2273584SXunlei Pang } 4163e2273584SXunlei Pang } 4164e2273584SXunlei Pang 416529c91e99STejun Heo /* nope, create a new one */ 416684193c07STejun Heo pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, node); 416729c91e99STejun Heo if (!pool || init_worker_pool(pool) < 0) 416829c91e99STejun Heo goto fail; 416929c91e99STejun Heo 417084193c07STejun Heo pool->node = node; 41715de7a03cSTejun Heo copy_workqueue_attrs(pool->attrs, attrs); 41725de7a03cSTejun Heo wqattrs_clear_for_pool(pool->attrs); 41732865a8fbSShaohua Li 417429c91e99STejun Heo if (worker_pool_assign_id(pool) < 0) 417529c91e99STejun Heo goto fail; 417629c91e99STejun Heo 417729c91e99STejun Heo /* create and start the initial worker */ 41783347fa09STejun Heo if (wq_online && !create_worker(pool)) 417929c91e99STejun Heo goto fail; 418029c91e99STejun Heo 418129c91e99STejun Heo /* install */ 418229c91e99STejun Heo hash_add(unbound_pool_hash, &pool->hash_node, hash); 41833fb1823cSLai Jiangshan 418429c91e99STejun Heo return pool; 418529c91e99STejun Heo fail: 418629c91e99STejun Heo if (pool) 418729c91e99STejun Heo put_unbound_pool(pool); 418829c91e99STejun Heo return NULL; 418929c91e99STejun Heo } 419029c91e99STejun Heo 41918864b4e5STejun Heo static void rcu_free_pwq(struct rcu_head *rcu) 41928864b4e5STejun Heo { 41938864b4e5STejun Heo kmem_cache_free(pwq_cache, 41948864b4e5STejun Heo container_of(rcu, struct pool_workqueue, rcu)); 41958864b4e5STejun Heo } 41968864b4e5STejun Heo 41978864b4e5STejun Heo /* 4198967b494eSTejun Heo * Scheduled on pwq_release_worker by put_pwq() when an unbound pwq hits zero 4199967b494eSTejun Heo * refcnt and needs to be destroyed. 42008864b4e5STejun Heo */ 4201687a9aa5STejun Heo static void pwq_release_workfn(struct kthread_work *work) 42028864b4e5STejun Heo { 42038864b4e5STejun Heo struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, 4204687a9aa5STejun Heo release_work); 42058864b4e5STejun Heo struct workqueue_struct *wq = pwq->wq; 42068864b4e5STejun Heo struct worker_pool *pool = pwq->pool; 4207b42b0bddSYang Yingliang bool is_last = false; 42088864b4e5STejun Heo 4209b42b0bddSYang Yingliang /* 4210687a9aa5STejun Heo * When @pwq is not linked, it doesn't hold any reference to the 4211b42b0bddSYang Yingliang * @wq, and @wq is invalid to access. 4212b42b0bddSYang Yingliang */ 4213b42b0bddSYang Yingliang if (!list_empty(&pwq->pwqs_node)) { 42143c25a55dSLai Jiangshan mutex_lock(&wq->mutex); 42158864b4e5STejun Heo list_del_rcu(&pwq->pwqs_node); 4216bc0caf09STejun Heo is_last = list_empty(&wq->pwqs); 42173c25a55dSLai Jiangshan mutex_unlock(&wq->mutex); 4218b42b0bddSYang Yingliang } 42198864b4e5STejun Heo 4220687a9aa5STejun Heo if (wq->flags & WQ_UNBOUND) { 4221a892caccSTejun Heo mutex_lock(&wq_pool_mutex); 42228864b4e5STejun Heo put_unbound_pool(pool); 4223a892caccSTejun Heo mutex_unlock(&wq_pool_mutex); 4224687a9aa5STejun Heo } 4225a892caccSTejun Heo 422625b00775SPaul E. McKenney call_rcu(&pwq->rcu, rcu_free_pwq); 42278864b4e5STejun Heo 42288864b4e5STejun Heo /* 42298864b4e5STejun Heo * If we're the last pwq going away, @wq is already dead and no one 4230e2dca7adSTejun Heo * is gonna access it anymore. Schedule RCU free. 42318864b4e5STejun Heo */ 4232669de8bdSBart Van Assche if (is_last) { 4233669de8bdSBart Van Assche wq_unregister_lockdep(wq); 423425b00775SPaul E. McKenney call_rcu(&wq->rcu, rcu_free_wq); 42356029a918STejun Heo } 4236669de8bdSBart Van Assche } 42378864b4e5STejun Heo 423867dc8325SCai Huoqing /* initialize newly allocated @pwq which is associated with @wq and @pool */ 4239f147f29eSTejun Heo static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, 4240f147f29eSTejun Heo struct worker_pool *pool) 4241d2c1d404STejun Heo { 4242d2c1d404STejun Heo BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); 4243d2c1d404STejun Heo 4244e50aba9aSTejun Heo memset(pwq, 0, sizeof(*pwq)); 4245e50aba9aSTejun Heo 4246d2c1d404STejun Heo pwq->pool = pool; 4247d2c1d404STejun Heo pwq->wq = wq; 4248d2c1d404STejun Heo pwq->flush_color = -1; 42498864b4e5STejun Heo pwq->refcnt = 1; 4250f97a4a1aSLai Jiangshan INIT_LIST_HEAD(&pwq->inactive_works); 42511befcf30STejun Heo INIT_LIST_HEAD(&pwq->pwqs_node); 4252d2c1d404STejun Heo INIT_LIST_HEAD(&pwq->mayday_node); 4253687a9aa5STejun Heo kthread_init_work(&pwq->release_work, pwq_release_workfn); 4254f147f29eSTejun Heo } 4255d2c1d404STejun Heo 4256f147f29eSTejun Heo /* sync @pwq with the current state of its associated wq and link it */ 42571befcf30STejun Heo static void link_pwq(struct pool_workqueue *pwq) 4258f147f29eSTejun Heo { 4259f147f29eSTejun Heo struct workqueue_struct *wq = pwq->wq; 4260f147f29eSTejun Heo 4261f147f29eSTejun Heo lockdep_assert_held(&wq->mutex); 426275ccf595STejun Heo 42631befcf30STejun Heo /* may be called multiple times, ignore if already linked */ 42641befcf30STejun Heo if (!list_empty(&pwq->pwqs_node)) 42651befcf30STejun Heo return; 42661befcf30STejun Heo 426729b1cb41SLai Jiangshan /* set the matching work_color */ 426875ccf595STejun Heo pwq->work_color = wq->work_color; 4269983ca25eSTejun Heo 4270983ca25eSTejun Heo /* link in @pwq */ 42719e8cd2f5STejun Heo list_add_rcu(&pwq->pwqs_node, &wq->pwqs); 4272df2d5ae4STejun Heo } 42736029a918STejun Heo 4274f147f29eSTejun Heo /* obtain a pool matching @attr and create a pwq associating the pool and @wq */ 4275f147f29eSTejun Heo static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq, 4276f147f29eSTejun Heo const struct workqueue_attrs *attrs) 4277f147f29eSTejun Heo { 4278f147f29eSTejun Heo struct worker_pool *pool; 4279f147f29eSTejun Heo struct pool_workqueue *pwq; 4280f147f29eSTejun Heo 4281f147f29eSTejun Heo lockdep_assert_held(&wq_pool_mutex); 4282f147f29eSTejun Heo 4283f147f29eSTejun Heo pool = get_unbound_pool(attrs); 4284f147f29eSTejun Heo if (!pool) 4285f147f29eSTejun Heo return NULL; 4286f147f29eSTejun Heo 4287e50aba9aSTejun Heo pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); 4288f147f29eSTejun Heo if (!pwq) { 4289f147f29eSTejun Heo put_unbound_pool(pool); 4290f147f29eSTejun Heo return NULL; 4291f147f29eSTejun Heo } 4292f147f29eSTejun Heo 4293f147f29eSTejun Heo init_pwq(pwq, wq, pool); 4294f147f29eSTejun Heo return pwq; 4295d2c1d404STejun Heo } 4296d2c1d404STejun Heo 42974c16bd32STejun Heo /** 4298fef59c9cSTejun Heo * wq_calc_pod_cpumask - calculate a wq_attrs' cpumask for a pod 4299042f7df1SLai Jiangshan * @attrs: the wq_attrs of the default pwq of the target workqueue 430084193c07STejun Heo * @cpu: the target CPU 43014c16bd32STejun Heo * @cpu_going_down: if >= 0, the CPU to consider as offline 43024c16bd32STejun Heo * 4303fef59c9cSTejun Heo * Calculate the cpumask a workqueue with @attrs should use on @pod. If 4304fef59c9cSTejun Heo * @cpu_going_down is >= 0, that cpu is considered offline during calculation. 43059546b29eSTejun Heo * The result is stored in @attrs->__pod_cpumask. 43064c16bd32STejun Heo * 4307fef59c9cSTejun Heo * If pod affinity is not enabled, @attrs->cpumask is always used. If enabled 4308fef59c9cSTejun Heo * and @pod has online CPUs requested by @attrs, the returned cpumask is the 4309fef59c9cSTejun Heo * intersection of the possible CPUs of @pod and @attrs->cpumask. 43104c16bd32STejun Heo * 4311fef59c9cSTejun Heo * The caller is responsible for ensuring that the cpumask of @pod stays stable. 43124c16bd32STejun Heo */ 43139546b29eSTejun Heo static void wq_calc_pod_cpumask(struct workqueue_attrs *attrs, int cpu, 43149546b29eSTejun Heo int cpu_going_down) 43154c16bd32STejun Heo { 431684193c07STejun Heo const struct wq_pod_type *pt = wqattrs_pod_type(attrs); 431784193c07STejun Heo int pod = pt->cpu_pod[cpu]; 43184c16bd32STejun Heo 4319fef59c9cSTejun Heo /* does @pod have any online CPUs @attrs wants? */ 43209546b29eSTejun Heo cpumask_and(attrs->__pod_cpumask, pt->pod_cpus[pod], attrs->cpumask); 43219546b29eSTejun Heo cpumask_and(attrs->__pod_cpumask, attrs->__pod_cpumask, cpu_online_mask); 43224c16bd32STejun Heo if (cpu_going_down >= 0) 43239546b29eSTejun Heo cpumask_clear_cpu(cpu_going_down, attrs->__pod_cpumask); 43244c16bd32STejun Heo 43259546b29eSTejun Heo if (cpumask_empty(attrs->__pod_cpumask)) { 43269546b29eSTejun Heo cpumask_copy(attrs->__pod_cpumask, attrs->cpumask); 432784193c07STejun Heo return; 432884193c07STejun Heo } 43294c16bd32STejun Heo 4330fef59c9cSTejun Heo /* yeap, return possible CPUs in @pod that @attrs wants */ 43319546b29eSTejun Heo cpumask_and(attrs->__pod_cpumask, attrs->cpumask, pt->pod_cpus[pod]); 43321ad0f0a7SMichael Bringmann 43339546b29eSTejun Heo if (cpumask_empty(attrs->__pod_cpumask)) 43341ad0f0a7SMichael Bringmann pr_warn_once("WARNING: workqueue cpumask: online intersect > " 43351ad0f0a7SMichael Bringmann "possible intersect\n"); 43364c16bd32STejun Heo } 43374c16bd32STejun Heo 4338bd31fb92STejun Heo /* install @pwq into @wq and return the old pwq, @cpu < 0 for dfl_pwq */ 4339636b927eSTejun Heo static struct pool_workqueue *install_unbound_pwq(struct workqueue_struct *wq, 4340636b927eSTejun Heo int cpu, struct pool_workqueue *pwq) 43411befcf30STejun Heo { 4342bd31fb92STejun Heo struct pool_workqueue __rcu **slot = unbound_pwq_slot(wq, cpu); 43431befcf30STejun Heo struct pool_workqueue *old_pwq; 43441befcf30STejun Heo 43455b95e1afSLai Jiangshan lockdep_assert_held(&wq_pool_mutex); 43461befcf30STejun Heo lockdep_assert_held(&wq->mutex); 43471befcf30STejun Heo 43481befcf30STejun Heo /* link_pwq() can handle duplicate calls */ 43491befcf30STejun Heo link_pwq(pwq); 43501befcf30STejun Heo 4351bd31fb92STejun Heo old_pwq = rcu_access_pointer(*slot); 4352bd31fb92STejun Heo rcu_assign_pointer(*slot, pwq); 43531befcf30STejun Heo return old_pwq; 43541befcf30STejun Heo } 43551befcf30STejun Heo 43562d5f0764SLai Jiangshan /* context to store the prepared attrs & pwqs before applying */ 43572d5f0764SLai Jiangshan struct apply_wqattrs_ctx { 43582d5f0764SLai Jiangshan struct workqueue_struct *wq; /* target workqueue */ 43592d5f0764SLai Jiangshan struct workqueue_attrs *attrs; /* attrs to apply */ 4360042f7df1SLai Jiangshan struct list_head list; /* queued for batching commit */ 43612d5f0764SLai Jiangshan struct pool_workqueue *dfl_pwq; 43622d5f0764SLai Jiangshan struct pool_workqueue *pwq_tbl[]; 43632d5f0764SLai Jiangshan }; 43642d5f0764SLai Jiangshan 43652d5f0764SLai Jiangshan /* free the resources after success or abort */ 43662d5f0764SLai Jiangshan static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx) 43672d5f0764SLai Jiangshan { 43682d5f0764SLai Jiangshan if (ctx) { 4369636b927eSTejun Heo int cpu; 43702d5f0764SLai Jiangshan 4371636b927eSTejun Heo for_each_possible_cpu(cpu) 4372636b927eSTejun Heo put_pwq_unlocked(ctx->pwq_tbl[cpu]); 43732d5f0764SLai Jiangshan put_pwq_unlocked(ctx->dfl_pwq); 43742d5f0764SLai Jiangshan 43752d5f0764SLai Jiangshan free_workqueue_attrs(ctx->attrs); 43762d5f0764SLai Jiangshan 43772d5f0764SLai Jiangshan kfree(ctx); 43782d5f0764SLai Jiangshan } 43792d5f0764SLai Jiangshan } 43802d5f0764SLai Jiangshan 43812d5f0764SLai Jiangshan /* allocate the attrs and pwqs for later installation */ 43822d5f0764SLai Jiangshan static struct apply_wqattrs_ctx * 43832d5f0764SLai Jiangshan apply_wqattrs_prepare(struct workqueue_struct *wq, 438499c621efSLai Jiangshan const struct workqueue_attrs *attrs, 438599c621efSLai Jiangshan const cpumask_var_t unbound_cpumask) 43862d5f0764SLai Jiangshan { 43872d5f0764SLai Jiangshan struct apply_wqattrs_ctx *ctx; 43889546b29eSTejun Heo struct workqueue_attrs *new_attrs; 4389636b927eSTejun Heo int cpu; 43902d5f0764SLai Jiangshan 43912d5f0764SLai Jiangshan lockdep_assert_held(&wq_pool_mutex); 43922d5f0764SLai Jiangshan 439384193c07STejun Heo if (WARN_ON(attrs->affn_scope < 0 || 439484193c07STejun Heo attrs->affn_scope >= WQ_AFFN_NR_TYPES)) 439584193c07STejun Heo return ERR_PTR(-EINVAL); 439684193c07STejun Heo 4397636b927eSTejun Heo ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_cpu_ids), GFP_KERNEL); 43982d5f0764SLai Jiangshan 4399be69d00dSThomas Gleixner new_attrs = alloc_workqueue_attrs(); 44009546b29eSTejun Heo if (!ctx || !new_attrs) 44012d5f0764SLai Jiangshan goto out_free; 44022d5f0764SLai Jiangshan 4403042f7df1SLai Jiangshan /* 44042d5f0764SLai Jiangshan * If something goes wrong during CPU up/down, we'll fall back to 44052d5f0764SLai Jiangshan * the default pwq covering whole @attrs->cpumask. Always create 44062d5f0764SLai Jiangshan * it even if we don't use it immediately. 44072d5f0764SLai Jiangshan */ 44080f36ee24STejun Heo copy_workqueue_attrs(new_attrs, attrs); 44090f36ee24STejun Heo wqattrs_actualize_cpumask(new_attrs, unbound_cpumask); 44109546b29eSTejun Heo cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask); 44112d5f0764SLai Jiangshan ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs); 44122d5f0764SLai Jiangshan if (!ctx->dfl_pwq) 44132d5f0764SLai Jiangshan goto out_free; 44142d5f0764SLai Jiangshan 4415636b927eSTejun Heo for_each_possible_cpu(cpu) { 4416af73f5c9STejun Heo if (new_attrs->ordered) { 44172d5f0764SLai Jiangshan ctx->dfl_pwq->refcnt++; 4418636b927eSTejun Heo ctx->pwq_tbl[cpu] = ctx->dfl_pwq; 4419636b927eSTejun Heo } else { 44209546b29eSTejun Heo wq_calc_pod_cpumask(new_attrs, cpu, -1); 44219546b29eSTejun Heo ctx->pwq_tbl[cpu] = alloc_unbound_pwq(wq, new_attrs); 4422636b927eSTejun Heo if (!ctx->pwq_tbl[cpu]) 4423636b927eSTejun Heo goto out_free; 44242d5f0764SLai Jiangshan } 44252d5f0764SLai Jiangshan } 44262d5f0764SLai Jiangshan 4427042f7df1SLai Jiangshan /* save the user configured attrs and sanitize it. */ 4428042f7df1SLai Jiangshan copy_workqueue_attrs(new_attrs, attrs); 4429042f7df1SLai Jiangshan cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask); 44309546b29eSTejun Heo cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask); 44312d5f0764SLai Jiangshan ctx->attrs = new_attrs; 4432042f7df1SLai Jiangshan 44332d5f0764SLai Jiangshan ctx->wq = wq; 44342d5f0764SLai Jiangshan return ctx; 44352d5f0764SLai Jiangshan 44362d5f0764SLai Jiangshan out_free: 44372d5f0764SLai Jiangshan free_workqueue_attrs(new_attrs); 44382d5f0764SLai Jiangshan apply_wqattrs_cleanup(ctx); 443984193c07STejun Heo return ERR_PTR(-ENOMEM); 44402d5f0764SLai Jiangshan } 44412d5f0764SLai Jiangshan 44422d5f0764SLai Jiangshan /* set attrs and install prepared pwqs, @ctx points to old pwqs on return */ 44432d5f0764SLai Jiangshan static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx) 44442d5f0764SLai Jiangshan { 4445636b927eSTejun Heo int cpu; 44462d5f0764SLai Jiangshan 44472d5f0764SLai Jiangshan /* all pwqs have been created successfully, let's install'em */ 44482d5f0764SLai Jiangshan mutex_lock(&ctx->wq->mutex); 44492d5f0764SLai Jiangshan 44502d5f0764SLai Jiangshan copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs); 44512d5f0764SLai Jiangshan 4452bd31fb92STejun Heo /* save the previous pwqs and install the new ones */ 4453636b927eSTejun Heo for_each_possible_cpu(cpu) 4454636b927eSTejun Heo ctx->pwq_tbl[cpu] = install_unbound_pwq(ctx->wq, cpu, 4455636b927eSTejun Heo ctx->pwq_tbl[cpu]); 4456bd31fb92STejun Heo ctx->dfl_pwq = install_unbound_pwq(ctx->wq, -1, ctx->dfl_pwq); 44572d5f0764SLai Jiangshan 44582d5f0764SLai Jiangshan mutex_unlock(&ctx->wq->mutex); 44592d5f0764SLai Jiangshan } 44602d5f0764SLai Jiangshan 4461a0111cf6SLai Jiangshan static void apply_wqattrs_lock(void) 4462a0111cf6SLai Jiangshan { 4463a0111cf6SLai Jiangshan /* CPUs should stay stable across pwq creations and installations */ 4464ffd8bea8SSebastian Andrzej Siewior cpus_read_lock(); 4465a0111cf6SLai Jiangshan mutex_lock(&wq_pool_mutex); 4466a0111cf6SLai Jiangshan } 4467a0111cf6SLai Jiangshan 4468a0111cf6SLai Jiangshan static void apply_wqattrs_unlock(void) 4469a0111cf6SLai Jiangshan { 4470a0111cf6SLai Jiangshan mutex_unlock(&wq_pool_mutex); 4471ffd8bea8SSebastian Andrzej Siewior cpus_read_unlock(); 4472a0111cf6SLai Jiangshan } 4473a0111cf6SLai Jiangshan 4474a0111cf6SLai Jiangshan static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, 4475a0111cf6SLai Jiangshan const struct workqueue_attrs *attrs) 4476a0111cf6SLai Jiangshan { 4477a0111cf6SLai Jiangshan struct apply_wqattrs_ctx *ctx; 4478a0111cf6SLai Jiangshan 4479a0111cf6SLai Jiangshan /* only unbound workqueues can change attributes */ 4480a0111cf6SLai Jiangshan if (WARN_ON(!(wq->flags & WQ_UNBOUND))) 4481a0111cf6SLai Jiangshan return -EINVAL; 4482a0111cf6SLai Jiangshan 4483a0111cf6SLai Jiangshan /* creating multiple pwqs breaks ordering guarantee */ 44840a94efb5STejun Heo if (!list_empty(&wq->pwqs)) { 44850a94efb5STejun Heo if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 4486a0111cf6SLai Jiangshan return -EINVAL; 4487a0111cf6SLai Jiangshan 44880a94efb5STejun Heo wq->flags &= ~__WQ_ORDERED; 44890a94efb5STejun Heo } 44900a94efb5STejun Heo 449199c621efSLai Jiangshan ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask); 449284193c07STejun Heo if (IS_ERR(ctx)) 449384193c07STejun Heo return PTR_ERR(ctx); 4494a0111cf6SLai Jiangshan 4495a0111cf6SLai Jiangshan /* the ctx has been prepared successfully, let's commit it */ 4496a0111cf6SLai Jiangshan apply_wqattrs_commit(ctx); 4497a0111cf6SLai Jiangshan apply_wqattrs_cleanup(ctx); 4498a0111cf6SLai Jiangshan 44996201171eSwanghaibin return 0; 4500a0111cf6SLai Jiangshan } 4501a0111cf6SLai Jiangshan 45029e8cd2f5STejun Heo /** 45039e8cd2f5STejun Heo * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue 45049e8cd2f5STejun Heo * @wq: the target workqueue 45059e8cd2f5STejun Heo * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs() 45069e8cd2f5STejun Heo * 4507fef59c9cSTejun Heo * Apply @attrs to an unbound workqueue @wq. Unless disabled, this function maps 4508fef59c9cSTejun Heo * a separate pwq to each CPU pod with possibles CPUs in @attrs->cpumask so that 4509fef59c9cSTejun Heo * work items are affine to the pod it was issued on. Older pwqs are released as 4510fef59c9cSTejun Heo * in-flight work items finish. Note that a work item which repeatedly requeues 4511fef59c9cSTejun Heo * itself back-to-back will stay on its current pwq. 45129e8cd2f5STejun Heo * 4513d185af30SYacine Belkadi * Performs GFP_KERNEL allocations. 4514d185af30SYacine Belkadi * 4515ffd8bea8SSebastian Andrzej Siewior * Assumes caller has CPU hotplug read exclusion, i.e. cpus_read_lock(). 4516509b3204SDaniel Jordan * 4517d185af30SYacine Belkadi * Return: 0 on success and -errno on failure. 45189e8cd2f5STejun Heo */ 4519513c98d0SDaniel Jordan int apply_workqueue_attrs(struct workqueue_struct *wq, 45209e8cd2f5STejun Heo const struct workqueue_attrs *attrs) 45219e8cd2f5STejun Heo { 4522a0111cf6SLai Jiangshan int ret; 45239e8cd2f5STejun Heo 4524509b3204SDaniel Jordan lockdep_assert_cpus_held(); 4525509b3204SDaniel Jordan 4526509b3204SDaniel Jordan mutex_lock(&wq_pool_mutex); 4527a0111cf6SLai Jiangshan ret = apply_workqueue_attrs_locked(wq, attrs); 4528509b3204SDaniel Jordan mutex_unlock(&wq_pool_mutex); 45292d5f0764SLai Jiangshan 45302d5f0764SLai Jiangshan return ret; 45319e8cd2f5STejun Heo } 45329e8cd2f5STejun Heo 45334c16bd32STejun Heo /** 4534fef59c9cSTejun Heo * wq_update_pod - update pod affinity of a wq for CPU hot[un]plug 45354c16bd32STejun Heo * @wq: the target workqueue 45364cbfd3deSTejun Heo * @cpu: the CPU to update pool association for 45374cbfd3deSTejun Heo * @hotplug_cpu: the CPU coming up or going down 45384c16bd32STejun Heo * @online: whether @cpu is coming up or going down 45394c16bd32STejun Heo * 45404c16bd32STejun Heo * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and 4541fef59c9cSTejun Heo * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update pod affinity of 45424c16bd32STejun Heo * @wq accordingly. 45434c16bd32STejun Heo * 45444c16bd32STejun Heo * 4545fef59c9cSTejun Heo * If pod affinity can't be adjusted due to memory allocation failure, it falls 4546fef59c9cSTejun Heo * back to @wq->dfl_pwq which may not be optimal but is always correct. 4547fef59c9cSTejun Heo * 4548fef59c9cSTejun Heo * Note that when the last allowed CPU of a pod goes offline for a workqueue 4549fef59c9cSTejun Heo * with a cpumask spanning multiple pods, the workers which were already 4550fef59c9cSTejun Heo * executing the work items for the workqueue will lose their CPU affinity and 4551fef59c9cSTejun Heo * may execute on any CPU. This is similar to how per-cpu workqueues behave on 4552fef59c9cSTejun Heo * CPU_DOWN. If a workqueue user wants strict affinity, it's the user's 4553fef59c9cSTejun Heo * responsibility to flush the work item from CPU_DOWN_PREPARE. 45544c16bd32STejun Heo */ 4555fef59c9cSTejun Heo static void wq_update_pod(struct workqueue_struct *wq, int cpu, 45564cbfd3deSTejun Heo int hotplug_cpu, bool online) 45574c16bd32STejun Heo { 45584cbfd3deSTejun Heo int off_cpu = online ? -1 : hotplug_cpu; 45594c16bd32STejun Heo struct pool_workqueue *old_pwq = NULL, *pwq; 45604c16bd32STejun Heo struct workqueue_attrs *target_attrs; 45614c16bd32STejun Heo 45624c16bd32STejun Heo lockdep_assert_held(&wq_pool_mutex); 45634c16bd32STejun Heo 456484193c07STejun Heo if (!(wq->flags & WQ_UNBOUND) || wq->unbound_attrs->ordered) 45654c16bd32STejun Heo return; 45664c16bd32STejun Heo 45674c16bd32STejun Heo /* 45684c16bd32STejun Heo * We don't wanna alloc/free wq_attrs for each wq for each CPU. 45694c16bd32STejun Heo * Let's use a preallocated one. The following buf is protected by 45704c16bd32STejun Heo * CPU hotplug exclusion. 45714c16bd32STejun Heo */ 4572fef59c9cSTejun Heo target_attrs = wq_update_pod_attrs_buf; 45734c16bd32STejun Heo 45744c16bd32STejun Heo copy_workqueue_attrs(target_attrs, wq->unbound_attrs); 45750f36ee24STejun Heo wqattrs_actualize_cpumask(target_attrs, wq_unbound_cpumask); 45764c16bd32STejun Heo 4577636b927eSTejun Heo /* nothing to do if the target cpumask matches the current pwq */ 45789546b29eSTejun Heo wq_calc_pod_cpumask(target_attrs, cpu, off_cpu); 4579bd31fb92STejun Heo if (wqattrs_equal(target_attrs, unbound_pwq(wq, cpu)->pool->attrs)) 4580f7142ed4SLai Jiangshan return; 45814c16bd32STejun Heo 45824c16bd32STejun Heo /* create a new pwq */ 45834c16bd32STejun Heo pwq = alloc_unbound_pwq(wq, target_attrs); 45844c16bd32STejun Heo if (!pwq) { 4585fef59c9cSTejun Heo pr_warn("workqueue: allocation failed while updating CPU pod affinity of \"%s\"\n", 45864c16bd32STejun Heo wq->name); 458777f300b1SDaeseok Youn goto use_dfl_pwq; 45884c16bd32STejun Heo } 45894c16bd32STejun Heo 4590f7142ed4SLai Jiangshan /* Install the new pwq. */ 45914c16bd32STejun Heo mutex_lock(&wq->mutex); 4592636b927eSTejun Heo old_pwq = install_unbound_pwq(wq, cpu, pwq); 45934c16bd32STejun Heo goto out_unlock; 45944c16bd32STejun Heo 45954c16bd32STejun Heo use_dfl_pwq: 4596f7142ed4SLai Jiangshan mutex_lock(&wq->mutex); 4597bd31fb92STejun Heo pwq = unbound_pwq(wq, -1); 4598bd31fb92STejun Heo raw_spin_lock_irq(&pwq->pool->lock); 4599bd31fb92STejun Heo get_pwq(pwq); 4600bd31fb92STejun Heo raw_spin_unlock_irq(&pwq->pool->lock); 4601bd31fb92STejun Heo old_pwq = install_unbound_pwq(wq, cpu, pwq); 46024c16bd32STejun Heo out_unlock: 46034c16bd32STejun Heo mutex_unlock(&wq->mutex); 46044c16bd32STejun Heo put_pwq_unlocked(old_pwq); 46054c16bd32STejun Heo } 46064c16bd32STejun Heo 460730cdf249STejun Heo static int alloc_and_link_pwqs(struct workqueue_struct *wq) 46081da177e4SLinus Torvalds { 460949e3cf44STejun Heo bool highpri = wq->flags & WQ_HIGHPRI; 46108a2b7538STejun Heo int cpu, ret; 4611e1d8aa9fSFrederic Weisbecker 4612687a9aa5STejun Heo wq->cpu_pwq = alloc_percpu(struct pool_workqueue *); 4613ee1ceef7STejun Heo if (!wq->cpu_pwq) 4614687a9aa5STejun Heo goto enomem; 461530cdf249STejun Heo 4616636b927eSTejun Heo if (!(wq->flags & WQ_UNBOUND)) { 461730cdf249STejun Heo for_each_possible_cpu(cpu) { 4618687a9aa5STejun Heo struct pool_workqueue **pwq_p = 4619ee1ceef7STejun Heo per_cpu_ptr(wq->cpu_pwq, cpu); 4620687a9aa5STejun Heo struct worker_pool *pool = 4621687a9aa5STejun Heo &(per_cpu_ptr(cpu_worker_pools, cpu)[highpri]); 462230cdf249STejun Heo 4623687a9aa5STejun Heo *pwq_p = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, 4624687a9aa5STejun Heo pool->node); 4625687a9aa5STejun Heo if (!*pwq_p) 4626687a9aa5STejun Heo goto enomem; 4627687a9aa5STejun Heo 4628687a9aa5STejun Heo init_pwq(*pwq_p, wq, pool); 4629f147f29eSTejun Heo 4630f147f29eSTejun Heo mutex_lock(&wq->mutex); 4631687a9aa5STejun Heo link_pwq(*pwq_p); 4632f147f29eSTejun Heo mutex_unlock(&wq->mutex); 463330cdf249STejun Heo } 463430cdf249STejun Heo return 0; 4635509b3204SDaniel Jordan } 4636509b3204SDaniel Jordan 4637ffd8bea8SSebastian Andrzej Siewior cpus_read_lock(); 4638509b3204SDaniel Jordan if (wq->flags & __WQ_ORDERED) { 4639bd31fb92STejun Heo struct pool_workqueue *dfl_pwq; 4640bd31fb92STejun Heo 46418a2b7538STejun Heo ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); 46428a2b7538STejun Heo /* there should only be single pwq for ordering guarantee */ 4643bd31fb92STejun Heo dfl_pwq = rcu_access_pointer(wq->dfl_pwq); 4644bd31fb92STejun Heo WARN(!ret && (wq->pwqs.next != &dfl_pwq->pwqs_node || 4645bd31fb92STejun Heo wq->pwqs.prev != &dfl_pwq->pwqs_node), 46468a2b7538STejun Heo "ordering guarantee broken for workqueue %s\n", wq->name); 46479e8cd2f5STejun Heo } else { 4648509b3204SDaniel Jordan ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); 46499e8cd2f5STejun Heo } 4650ffd8bea8SSebastian Andrzej Siewior cpus_read_unlock(); 4651509b3204SDaniel Jordan 465264344553SZqiang /* for unbound pwq, flush the pwq_release_worker ensures that the 465364344553SZqiang * pwq_release_workfn() completes before calling kfree(wq). 465464344553SZqiang */ 465564344553SZqiang if (ret) 465664344553SZqiang kthread_flush_worker(pwq_release_worker); 465764344553SZqiang 4658509b3204SDaniel Jordan return ret; 4659687a9aa5STejun Heo 4660687a9aa5STejun Heo enomem: 4661687a9aa5STejun Heo if (wq->cpu_pwq) { 46627b42f401SZqiang for_each_possible_cpu(cpu) { 46637b42f401SZqiang struct pool_workqueue *pwq = *per_cpu_ptr(wq->cpu_pwq, cpu); 46647b42f401SZqiang 46657b42f401SZqiang if (pwq) 46667b42f401SZqiang kmem_cache_free(pwq_cache, pwq); 46677b42f401SZqiang } 4668687a9aa5STejun Heo free_percpu(wq->cpu_pwq); 4669687a9aa5STejun Heo wq->cpu_pwq = NULL; 4670687a9aa5STejun Heo } 4671687a9aa5STejun Heo return -ENOMEM; 46720f900049STejun Heo } 46730f900049STejun Heo 4674f3421797STejun Heo static int wq_clamp_max_active(int max_active, unsigned int flags, 4675f3421797STejun Heo const char *name) 4676b71ab8c2STejun Heo { 4677636b927eSTejun Heo if (max_active < 1 || max_active > WQ_MAX_ACTIVE) 4678044c782cSValentin Ilie pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n", 4679636b927eSTejun Heo max_active, name, 1, WQ_MAX_ACTIVE); 4680b71ab8c2STejun Heo 4681636b927eSTejun Heo return clamp_val(max_active, 1, WQ_MAX_ACTIVE); 4682b71ab8c2STejun Heo } 4683b71ab8c2STejun Heo 4684983c7515STejun Heo /* 4685983c7515STejun Heo * Workqueues which may be used during memory reclaim should have a rescuer 4686983c7515STejun Heo * to guarantee forward progress. 4687983c7515STejun Heo */ 4688983c7515STejun Heo static int init_rescuer(struct workqueue_struct *wq) 4689983c7515STejun Heo { 4690983c7515STejun Heo struct worker *rescuer; 4691b92b36eaSDan Carpenter int ret; 4692983c7515STejun Heo 4693983c7515STejun Heo if (!(wq->flags & WQ_MEM_RECLAIM)) 4694983c7515STejun Heo return 0; 4695983c7515STejun Heo 4696983c7515STejun Heo rescuer = alloc_worker(NUMA_NO_NODE); 46974c0736a7SPetr Mladek if (!rescuer) { 46984c0736a7SPetr Mladek pr_err("workqueue: Failed to allocate a rescuer for wq \"%s\"\n", 46994c0736a7SPetr Mladek wq->name); 4700983c7515STejun Heo return -ENOMEM; 47014c0736a7SPetr Mladek } 4702983c7515STejun Heo 4703983c7515STejun Heo rescuer->rescue_wq = wq; 4704b6a46f72SAaron Tomlin rescuer->task = kthread_create(rescuer_thread, rescuer, "kworker/R-%s", wq->name); 4705f187b697SSean Fu if (IS_ERR(rescuer->task)) { 4706b92b36eaSDan Carpenter ret = PTR_ERR(rescuer->task); 47074c0736a7SPetr Mladek pr_err("workqueue: Failed to create a rescuer kthread for wq \"%s\": %pe", 47084c0736a7SPetr Mladek wq->name, ERR_PTR(ret)); 4709983c7515STejun Heo kfree(rescuer); 4710b92b36eaSDan Carpenter return ret; 4711983c7515STejun Heo } 4712983c7515STejun Heo 4713983c7515STejun Heo wq->rescuer = rescuer; 4714983c7515STejun Heo kthread_bind_mask(rescuer->task, cpu_possible_mask); 4715983c7515STejun Heo wake_up_process(rescuer->task); 4716983c7515STejun Heo 4717983c7515STejun Heo return 0; 4718983c7515STejun Heo } 4719983c7515STejun Heo 472082e098f5STejun Heo /** 472182e098f5STejun Heo * wq_adjust_max_active - update a wq's max_active to the current setting 472282e098f5STejun Heo * @wq: target workqueue 472382e098f5STejun Heo * 472482e098f5STejun Heo * If @wq isn't freezing, set @wq->max_active to the saved_max_active and 472582e098f5STejun Heo * activate inactive work items accordingly. If @wq is freezing, clear 472682e098f5STejun Heo * @wq->max_active to zero. 472782e098f5STejun Heo */ 472882e098f5STejun Heo static void wq_adjust_max_active(struct workqueue_struct *wq) 472982e098f5STejun Heo { 47305f99fee6STejun Heo bool activated; 473182e098f5STejun Heo 473282e098f5STejun Heo lockdep_assert_held(&wq->mutex); 473382e098f5STejun Heo 473482e098f5STejun Heo if ((wq->flags & WQ_FREEZABLE) && workqueue_freezing) { 47356741dd3fSGreg Kroah-Hartman WRITE_ONCE(wq->max_active, 0); 47366741dd3fSGreg Kroah-Hartman return; 473782e098f5STejun Heo } 473882e098f5STejun Heo 47396741dd3fSGreg Kroah-Hartman if (wq->max_active == wq->saved_max_active) 474082e098f5STejun Heo return; 474182e098f5STejun Heo 474282e098f5STejun Heo /* 47436741dd3fSGreg Kroah-Hartman * Update @wq->max_active and then kick inactive work items if more 474482e098f5STejun Heo * active work items are allowed. This doesn't break work item ordering 474582e098f5STejun Heo * because new work items are always queued behind existing inactive 474682e098f5STejun Heo * work items if there are any. 474782e098f5STejun Heo */ 47486741dd3fSGreg Kroah-Hartman WRITE_ONCE(wq->max_active, wq->saved_max_active); 474982e098f5STejun Heo 47505f99fee6STejun Heo /* 47515f99fee6STejun Heo * Round-robin through pwq's activating the first inactive work item 47525f99fee6STejun Heo * until max_active is filled. 47535f99fee6STejun Heo */ 47545f99fee6STejun Heo do { 47555f99fee6STejun Heo struct pool_workqueue *pwq; 47565f99fee6STejun Heo 47575f99fee6STejun Heo activated = false; 475882e098f5STejun Heo for_each_pwq(pwq, wq) { 475982e098f5STejun Heo unsigned long flags; 476082e098f5STejun Heo 47615f99fee6STejun Heo /* can be called during early boot w/ irq disabled */ 476282e098f5STejun Heo raw_spin_lock_irqsave(&pwq->pool->lock, flags); 47636741dd3fSGreg Kroah-Hartman if (pwq_activate_first_inactive(pwq)) { 47645f99fee6STejun Heo activated = true; 476582e098f5STejun Heo kick_pool(pwq->pool); 47665f99fee6STejun Heo } 476782e098f5STejun Heo raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); 476882e098f5STejun Heo } 47695f99fee6STejun Heo } while (activated); 477082e098f5STejun Heo } 477182e098f5STejun Heo 4772a2775bbcSMathieu Malaterre __printf(1, 4) 4773669de8bdSBart Van Assche struct workqueue_struct *alloc_workqueue(const char *fmt, 477497e37d7bSTejun Heo unsigned int flags, 4775669de8bdSBart Van Assche int max_active, ...) 47763af24433SOleg Nesterov { 4777ecf6881fSTejun Heo va_list args; 47783af24433SOleg Nesterov struct workqueue_struct *wq; 4779bfb429f3SGreg Kroah-Hartman int len; 4780b196be89STejun Heo 47815c0338c6STejun Heo /* 4782fef59c9cSTejun Heo * Unbound && max_active == 1 used to imply ordered, which is no longer 4783fef59c9cSTejun Heo * the case on many machines due to per-pod pools. While 47845c0338c6STejun Heo * alloc_ordered_workqueue() is the right way to create an ordered 4785fef59c9cSTejun Heo * workqueue, keep the previous behavior to avoid subtle breakages. 47865c0338c6STejun Heo */ 47875c0338c6STejun Heo if ((flags & WQ_UNBOUND) && max_active == 1) 47885c0338c6STejun Heo flags |= __WQ_ORDERED; 47895c0338c6STejun Heo 4790cee22a15SViresh Kumar /* see the comment above the definition of WQ_POWER_EFFICIENT */ 4791cee22a15SViresh Kumar if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient) 4792cee22a15SViresh Kumar flags |= WQ_UNBOUND; 4793cee22a15SViresh Kumar 4794ecf6881fSTejun Heo /* allocate wq and format name */ 4795bfb429f3SGreg Kroah-Hartman wq = kzalloc(sizeof(*wq), GFP_KERNEL); 4796b196be89STejun Heo if (!wq) 4797d2c1d404STejun Heo return NULL; 4798b196be89STejun Heo 47996029a918STejun Heo if (flags & WQ_UNBOUND) { 4800be69d00dSThomas Gleixner wq->unbound_attrs = alloc_workqueue_attrs(); 48016029a918STejun Heo if (!wq->unbound_attrs) 48026029a918STejun Heo goto err_free_wq; 48036029a918STejun Heo } 48046029a918STejun Heo 4805669de8bdSBart Van Assche va_start(args, max_active); 4806bfb429f3SGreg Kroah-Hartman len = vsnprintf(wq->name, sizeof(wq->name), fmt, args); 4807b196be89STejun Heo va_end(args); 48083af24433SOleg Nesterov 4809bfb429f3SGreg Kroah-Hartman if (len >= WQ_NAME_LEN) 4810bfb429f3SGreg Kroah-Hartman pr_warn_once("workqueue: name exceeds WQ_NAME_LEN. Truncating to: %s\n", wq->name); 481143a181f8SAudra Mitchell 4812d320c038STejun Heo max_active = max_active ?: WQ_DFL_ACTIVE; 4813b196be89STejun Heo max_active = wq_clamp_max_active(max_active, flags, wq->name); 48143af24433SOleg Nesterov 4815b196be89STejun Heo /* init wq */ 481697e37d7bSTejun Heo wq->flags = flags; 481782e098f5STejun Heo wq->max_active = max_active; 48186741dd3fSGreg Kroah-Hartman wq->saved_max_active = max_active; 48193c25a55dSLai Jiangshan mutex_init(&wq->mutex); 4820112202d9STejun Heo atomic_set(&wq->nr_pwqs_to_flush, 0); 482130cdf249STejun Heo INIT_LIST_HEAD(&wq->pwqs); 482273f53c4aSTejun Heo INIT_LIST_HEAD(&wq->flusher_queue); 482373f53c4aSTejun Heo INIT_LIST_HEAD(&wq->flusher_overflow); 4824493a1724STejun Heo INIT_LIST_HEAD(&wq->maydays); 48253af24433SOleg Nesterov 4826669de8bdSBart Van Assche wq_init_lockdep(wq); 4827cce1a165SOleg Nesterov INIT_LIST_HEAD(&wq->list); 48283af24433SOleg Nesterov 4829b522229aSTejun Heo if (alloc_and_link_pwqs(wq) < 0) 4830bfb429f3SGreg Kroah-Hartman goto err_unreg_lockdep; 48311537663fSTejun Heo 483240c17f75STejun Heo if (wq_online && init_rescuer(wq) < 0) 4833d2c1d404STejun Heo goto err_destroy; 4834e22bee78STejun Heo 4835226223abSTejun Heo if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq)) 4836226223abSTejun Heo goto err_destroy; 4837226223abSTejun Heo 48386af8bf3dSOleg Nesterov /* 483968e13a67SLai Jiangshan * wq_pool_mutex protects global freeze state and workqueues list. 484068e13a67SLai Jiangshan * Grab it, adjust max_active and add the new @wq to workqueues 484168e13a67SLai Jiangshan * list. 48426af8bf3dSOleg Nesterov */ 484368e13a67SLai Jiangshan mutex_lock(&wq_pool_mutex); 4844a0a1a5fdSTejun Heo 4845a357fc03SLai Jiangshan mutex_lock(&wq->mutex); 484682e098f5STejun Heo wq_adjust_max_active(wq); 4847a357fc03SLai Jiangshan mutex_unlock(&wq->mutex); 4848a0a1a5fdSTejun Heo 4849e2dca7adSTejun Heo list_add_tail_rcu(&wq->list, &workqueues); 4850a0a1a5fdSTejun Heo 485168e13a67SLai Jiangshan mutex_unlock(&wq_pool_mutex); 48523af24433SOleg Nesterov 48533af24433SOleg Nesterov return wq; 4854d2c1d404STejun Heo 485582efcab3SBart Van Assche err_unreg_lockdep: 4856009bb421SBart Van Assche wq_unregister_lockdep(wq); 4857009bb421SBart Van Assche wq_free_lockdep(wq); 485882efcab3SBart Van Assche err_free_wq: 48596029a918STejun Heo free_workqueue_attrs(wq->unbound_attrs); 48604690c4abSTejun Heo kfree(wq); 4861d2c1d404STejun Heo return NULL; 4862d2c1d404STejun Heo err_destroy: 4863d2c1d404STejun Heo destroy_workqueue(wq); 48644690c4abSTejun Heo return NULL; 48651da177e4SLinus Torvalds } 4866669de8bdSBart Van Assche EXPORT_SYMBOL_GPL(alloc_workqueue); 48671da177e4SLinus Torvalds 4868c29eb853STejun Heo static bool pwq_busy(struct pool_workqueue *pwq) 4869c29eb853STejun Heo { 4870c29eb853STejun Heo int i; 4871c29eb853STejun Heo 4872c29eb853STejun Heo for (i = 0; i < WORK_NR_COLORS; i++) 4873c29eb853STejun Heo if (pwq->nr_in_flight[i]) 4874c29eb853STejun Heo return true; 4875c29eb853STejun Heo 4876bd31fb92STejun Heo if ((pwq != rcu_access_pointer(pwq->wq->dfl_pwq)) && (pwq->refcnt > 1)) 4877c29eb853STejun Heo return true; 4878bad184d2STejun Heo if (!pwq_is_empty(pwq)) 4879c29eb853STejun Heo return true; 4880c29eb853STejun Heo 4881c29eb853STejun Heo return false; 4882c29eb853STejun Heo } 4883c29eb853STejun Heo 48843af24433SOleg Nesterov /** 48853af24433SOleg Nesterov * destroy_workqueue - safely terminate a workqueue 48863af24433SOleg Nesterov * @wq: target workqueue 48873af24433SOleg Nesterov * 48883af24433SOleg Nesterov * Safely destroy a workqueue. All work currently pending will be done first. 48893af24433SOleg Nesterov */ 48903af24433SOleg Nesterov void destroy_workqueue(struct workqueue_struct *wq) 48913af24433SOleg Nesterov { 489249e3cf44STejun Heo struct pool_workqueue *pwq; 4893636b927eSTejun Heo int cpu; 48943af24433SOleg Nesterov 4895def98c84STejun Heo /* 4896def98c84STejun Heo * Remove it from sysfs first so that sanity check failure doesn't 4897def98c84STejun Heo * lead to sysfs name conflicts. 4898def98c84STejun Heo */ 4899def98c84STejun Heo workqueue_sysfs_unregister(wq); 4900def98c84STejun Heo 490133e3f0a3SRichard Clark /* mark the workqueue destruction is in progress */ 490233e3f0a3SRichard Clark mutex_lock(&wq->mutex); 490333e3f0a3SRichard Clark wq->flags |= __WQ_DESTROYING; 490433e3f0a3SRichard Clark mutex_unlock(&wq->mutex); 490533e3f0a3SRichard Clark 49069c5a2ba7STejun Heo /* drain it before proceeding with destruction */ 49079c5a2ba7STejun Heo drain_workqueue(wq); 4908c8efcc25STejun Heo 4909def98c84STejun Heo /* kill rescuer, if sanity checks fail, leave it w/o rescuer */ 4910def98c84STejun Heo if (wq->rescuer) { 4911def98c84STejun Heo struct worker *rescuer = wq->rescuer; 4912def98c84STejun Heo 4913def98c84STejun Heo /* this prevents new queueing */ 4914a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&wq_mayday_lock); 4915def98c84STejun Heo wq->rescuer = NULL; 4916a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&wq_mayday_lock); 4917def98c84STejun Heo 4918def98c84STejun Heo /* rescuer will empty maydays list before exiting */ 4919def98c84STejun Heo kthread_stop(rescuer->task); 49208efe1223STejun Heo kfree(rescuer); 4921def98c84STejun Heo } 4922def98c84STejun Heo 4923c29eb853STejun Heo /* 4924c29eb853STejun Heo * Sanity checks - grab all the locks so that we wait for all 4925c29eb853STejun Heo * in-flight operations which may do put_pwq(). 4926c29eb853STejun Heo */ 4927c29eb853STejun Heo mutex_lock(&wq_pool_mutex); 4928b09f4fd3SLai Jiangshan mutex_lock(&wq->mutex); 492949e3cf44STejun Heo for_each_pwq(pwq, wq) { 4930a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pwq->pool->lock); 4931c29eb853STejun Heo if (WARN_ON(pwq_busy(pwq))) { 49321d9a6159SKefeng Wang pr_warn("%s: %s has the following busy pwq\n", 4933e66b39afSTejun Heo __func__, wq->name); 4934c29eb853STejun Heo show_pwq(pwq); 4935a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pwq->pool->lock); 4936b09f4fd3SLai Jiangshan mutex_unlock(&wq->mutex); 4937c29eb853STejun Heo mutex_unlock(&wq_pool_mutex); 493855df0933SImran Khan show_one_workqueue(wq); 49396183c009STejun Heo return; 494076af4d93STejun Heo } 4941a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pwq->pool->lock); 494276af4d93STejun Heo } 4943b09f4fd3SLai Jiangshan mutex_unlock(&wq->mutex); 49446183c009STejun Heo 4945a0a1a5fdSTejun Heo /* 4946a0a1a5fdSTejun Heo * wq list is used to freeze wq, remove from list after 4947a0a1a5fdSTejun Heo * flushing is complete in case freeze races us. 4948a0a1a5fdSTejun Heo */ 4949e2dca7adSTejun Heo list_del_rcu(&wq->list); 495068e13a67SLai Jiangshan mutex_unlock(&wq_pool_mutex); 49513af24433SOleg Nesterov 49528864b4e5STejun Heo /* 4953636b927eSTejun Heo * We're the sole accessor of @wq. Directly access cpu_pwq and dfl_pwq 4954636b927eSTejun Heo * to put the base refs. @wq will be auto-destroyed from the last 4955636b927eSTejun Heo * pwq_put. RCU read lock prevents @wq from going away from under us. 49568864b4e5STejun Heo */ 4957636b927eSTejun Heo rcu_read_lock(); 4958636b927eSTejun Heo 4959636b927eSTejun Heo for_each_possible_cpu(cpu) { 4960bd31fb92STejun Heo put_pwq_unlocked(unbound_pwq(wq, cpu)); 4961bd31fb92STejun Heo RCU_INIT_POINTER(*unbound_pwq_slot(wq, cpu), NULL); 49624c16bd32STejun Heo } 49634c16bd32STejun Heo 4964bd31fb92STejun Heo put_pwq_unlocked(unbound_pwq(wq, -1)); 4965bd31fb92STejun Heo RCU_INIT_POINTER(*unbound_pwq_slot(wq, -1), NULL); 4966636b927eSTejun Heo 4967636b927eSTejun Heo rcu_read_unlock(); 49683af24433SOleg Nesterov } 49693af24433SOleg Nesterov EXPORT_SYMBOL_GPL(destroy_workqueue); 49703af24433SOleg Nesterov 4971dcd989cbSTejun Heo /** 4972dcd989cbSTejun Heo * workqueue_set_max_active - adjust max_active of a workqueue 4973dcd989cbSTejun Heo * @wq: target workqueue 4974dcd989cbSTejun Heo * @max_active: new max_active value. 4975dcd989cbSTejun Heo * 49766741dd3fSGreg Kroah-Hartman * Set max_active of @wq to @max_active. 4977dcd989cbSTejun Heo * 4978dcd989cbSTejun Heo * CONTEXT: 4979dcd989cbSTejun Heo * Don't call from IRQ context. 4980dcd989cbSTejun Heo */ 4981dcd989cbSTejun Heo void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) 4982dcd989cbSTejun Heo { 49838719dceaSTejun Heo /* disallow meddling with max_active for ordered workqueues */ 49840a94efb5STejun Heo if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 49858719dceaSTejun Heo return; 49868719dceaSTejun Heo 4987f3421797STejun Heo max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); 4988dcd989cbSTejun Heo 4989a357fc03SLai Jiangshan mutex_lock(&wq->mutex); 4990dcd989cbSTejun Heo 49910a94efb5STejun Heo wq->flags &= ~__WQ_ORDERED; 4992dcd989cbSTejun Heo wq->saved_max_active = max_active; 499382e098f5STejun Heo wq_adjust_max_active(wq); 4994dcd989cbSTejun Heo 4995a357fc03SLai Jiangshan mutex_unlock(&wq->mutex); 4996dcd989cbSTejun Heo } 4997dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(workqueue_set_max_active); 4998dcd989cbSTejun Heo 4999dcd989cbSTejun Heo /** 500027d4ee03SLukas Wunner * current_work - retrieve %current task's work struct 500127d4ee03SLukas Wunner * 500227d4ee03SLukas Wunner * Determine if %current task is a workqueue worker and what it's working on. 500327d4ee03SLukas Wunner * Useful to find out the context that the %current task is running in. 500427d4ee03SLukas Wunner * 500527d4ee03SLukas Wunner * Return: work struct if %current task is a workqueue worker, %NULL otherwise. 500627d4ee03SLukas Wunner */ 500727d4ee03SLukas Wunner struct work_struct *current_work(void) 500827d4ee03SLukas Wunner { 500927d4ee03SLukas Wunner struct worker *worker = current_wq_worker(); 501027d4ee03SLukas Wunner 501127d4ee03SLukas Wunner return worker ? worker->current_work : NULL; 501227d4ee03SLukas Wunner } 501327d4ee03SLukas Wunner EXPORT_SYMBOL(current_work); 501427d4ee03SLukas Wunner 501527d4ee03SLukas Wunner /** 5016e6267616STejun Heo * current_is_workqueue_rescuer - is %current workqueue rescuer? 5017e6267616STejun Heo * 5018e6267616STejun Heo * Determine whether %current is a workqueue rescuer. Can be used from 5019e6267616STejun Heo * work functions to determine whether it's being run off the rescuer task. 5020d185af30SYacine Belkadi * 5021d185af30SYacine Belkadi * Return: %true if %current is a workqueue rescuer. %false otherwise. 5022e6267616STejun Heo */ 5023e6267616STejun Heo bool current_is_workqueue_rescuer(void) 5024e6267616STejun Heo { 5025e6267616STejun Heo struct worker *worker = current_wq_worker(); 5026e6267616STejun Heo 50276a092dfdSLai Jiangshan return worker && worker->rescue_wq; 5028e6267616STejun Heo } 5029e6267616STejun Heo 5030e6267616STejun Heo /** 5031dcd989cbSTejun Heo * workqueue_congested - test whether a workqueue is congested 5032dcd989cbSTejun Heo * @cpu: CPU in question 5033dcd989cbSTejun Heo * @wq: target workqueue 5034dcd989cbSTejun Heo * 5035dcd989cbSTejun Heo * Test whether @wq's cpu workqueue for @cpu is congested. There is 5036dcd989cbSTejun Heo * no synchronization around this function and the test result is 5037dcd989cbSTejun Heo * unreliable and only useful as advisory hints or for debugging. 5038dcd989cbSTejun Heo * 5039d3251859STejun Heo * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU. 5040636b927eSTejun Heo * 5041636b927eSTejun Heo * With the exception of ordered workqueues, all workqueues have per-cpu 5042636b927eSTejun Heo * pool_workqueues, each with its own congested state. A workqueue being 5043636b927eSTejun Heo * congested on one CPU doesn't mean that the workqueue is contested on any 5044636b927eSTejun Heo * other CPUs. 5045d3251859STejun Heo * 5046d185af30SYacine Belkadi * Return: 5047dcd989cbSTejun Heo * %true if congested, %false otherwise. 5048dcd989cbSTejun Heo */ 5049d84ff051STejun Heo bool workqueue_congested(int cpu, struct workqueue_struct *wq) 5050dcd989cbSTejun Heo { 50517fb98ea7STejun Heo struct pool_workqueue *pwq; 505276af4d93STejun Heo bool ret; 505376af4d93STejun Heo 505424acfb71SThomas Gleixner rcu_read_lock(); 505524acfb71SThomas Gleixner preempt_disable(); 50567fb98ea7STejun Heo 5057d3251859STejun Heo if (cpu == WORK_CPU_UNBOUND) 5058d3251859STejun Heo cpu = smp_processor_id(); 5059d3251859STejun Heo 5060687a9aa5STejun Heo pwq = *per_cpu_ptr(wq->cpu_pwq, cpu); 5061f97a4a1aSLai Jiangshan ret = !list_empty(&pwq->inactive_works); 5062636b927eSTejun Heo 506324acfb71SThomas Gleixner preempt_enable(); 506424acfb71SThomas Gleixner rcu_read_unlock(); 506576af4d93STejun Heo 506676af4d93STejun Heo return ret; 5067dcd989cbSTejun Heo } 5068dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(workqueue_congested); 5069dcd989cbSTejun Heo 5070dcd989cbSTejun Heo /** 5071dcd989cbSTejun Heo * work_busy - test whether a work is currently pending or running 5072dcd989cbSTejun Heo * @work: the work to be tested 5073dcd989cbSTejun Heo * 5074dcd989cbSTejun Heo * Test whether @work is currently pending or running. There is no 5075dcd989cbSTejun Heo * synchronization around this function and the test result is 5076dcd989cbSTejun Heo * unreliable and only useful as advisory hints or for debugging. 5077dcd989cbSTejun Heo * 5078d185af30SYacine Belkadi * Return: 5079dcd989cbSTejun Heo * OR'd bitmask of WORK_BUSY_* bits. 5080dcd989cbSTejun Heo */ 5081dcd989cbSTejun Heo unsigned int work_busy(struct work_struct *work) 5082dcd989cbSTejun Heo { 5083fa1b54e6STejun Heo struct worker_pool *pool; 5084dcd989cbSTejun Heo unsigned long flags; 5085dcd989cbSTejun Heo unsigned int ret = 0; 5086dcd989cbSTejun Heo 5087dcd989cbSTejun Heo if (work_pending(work)) 5088dcd989cbSTejun Heo ret |= WORK_BUSY_PENDING; 5089038366c5SLai Jiangshan 509024acfb71SThomas Gleixner rcu_read_lock(); 5091fa1b54e6STejun Heo pool = get_work_pool(work); 5092038366c5SLai Jiangshan if (pool) { 5093a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irqsave(&pool->lock, flags); 5094c9e7cf27STejun Heo if (find_worker_executing_work(pool, work)) 5095dcd989cbSTejun Heo ret |= WORK_BUSY_RUNNING; 5096a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irqrestore(&pool->lock, flags); 5097038366c5SLai Jiangshan } 509824acfb71SThomas Gleixner rcu_read_unlock(); 5099dcd989cbSTejun Heo 5100dcd989cbSTejun Heo return ret; 5101dcd989cbSTejun Heo } 5102dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(work_busy); 5103dcd989cbSTejun Heo 51043d1cb205STejun Heo /** 51053d1cb205STejun Heo * set_worker_desc - set description for the current work item 51063d1cb205STejun Heo * @fmt: printf-style format string 51073d1cb205STejun Heo * @...: arguments for the format string 51083d1cb205STejun Heo * 51093d1cb205STejun Heo * This function can be called by a running work function to describe what 51103d1cb205STejun Heo * the work item is about. If the worker task gets dumped, this 51113d1cb205STejun Heo * information will be printed out together to help debugging. The 51123d1cb205STejun Heo * description can be at most WORKER_DESC_LEN including the trailing '\0'. 51133d1cb205STejun Heo */ 51143d1cb205STejun Heo void set_worker_desc(const char *fmt, ...) 51153d1cb205STejun Heo { 51163d1cb205STejun Heo struct worker *worker = current_wq_worker(); 51173d1cb205STejun Heo va_list args; 51183d1cb205STejun Heo 51193d1cb205STejun Heo if (worker) { 51203d1cb205STejun Heo va_start(args, fmt); 51213d1cb205STejun Heo vsnprintf(worker->desc, sizeof(worker->desc), fmt, args); 51223d1cb205STejun Heo va_end(args); 51233d1cb205STejun Heo } 51243d1cb205STejun Heo } 51255c750d58SSteffen Maier EXPORT_SYMBOL_GPL(set_worker_desc); 51263d1cb205STejun Heo 51273d1cb205STejun Heo /** 51283d1cb205STejun Heo * print_worker_info - print out worker information and description 51293d1cb205STejun Heo * @log_lvl: the log level to use when printing 51303d1cb205STejun Heo * @task: target task 51313d1cb205STejun Heo * 51323d1cb205STejun Heo * If @task is a worker and currently executing a work item, print out the 51333d1cb205STejun Heo * name of the workqueue being serviced and worker description set with 51343d1cb205STejun Heo * set_worker_desc() by the currently executing work item. 51353d1cb205STejun Heo * 51363d1cb205STejun Heo * This function can be safely called on any task as long as the 51373d1cb205STejun Heo * task_struct itself is accessible. While safe, this function isn't 51383d1cb205STejun Heo * synchronized and may print out mixups or garbages of limited length. 51393d1cb205STejun Heo */ 51403d1cb205STejun Heo void print_worker_info(const char *log_lvl, struct task_struct *task) 51413d1cb205STejun Heo { 51423d1cb205STejun Heo work_func_t *fn = NULL; 51433d1cb205STejun Heo char name[WQ_NAME_LEN] = { }; 51443d1cb205STejun Heo char desc[WORKER_DESC_LEN] = { }; 51453d1cb205STejun Heo struct pool_workqueue *pwq = NULL; 51463d1cb205STejun Heo struct workqueue_struct *wq = NULL; 51473d1cb205STejun Heo struct worker *worker; 51483d1cb205STejun Heo 51493d1cb205STejun Heo if (!(task->flags & PF_WQ_WORKER)) 51503d1cb205STejun Heo return; 51513d1cb205STejun Heo 51523d1cb205STejun Heo /* 51533d1cb205STejun Heo * This function is called without any synchronization and @task 51543d1cb205STejun Heo * could be in any state. Be careful with dereferences. 51553d1cb205STejun Heo */ 5156e700591aSPetr Mladek worker = kthread_probe_data(task); 51573d1cb205STejun Heo 51583d1cb205STejun Heo /* 51598bf89593STejun Heo * Carefully copy the associated workqueue's workfn, name and desc. 51608bf89593STejun Heo * Keep the original last '\0' in case the original is garbage. 51613d1cb205STejun Heo */ 5162fe557319SChristoph Hellwig copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn)); 5163fe557319SChristoph Hellwig copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq)); 5164fe557319SChristoph Hellwig copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq)); 5165fe557319SChristoph Hellwig copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1); 5166fe557319SChristoph Hellwig copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1); 51673d1cb205STejun Heo 51683d1cb205STejun Heo if (fn || name[0] || desc[0]) { 5169d75f773cSSakari Ailus printk("%sWorkqueue: %s %ps", log_lvl, name, fn); 51708bf89593STejun Heo if (strcmp(name, desc)) 51713d1cb205STejun Heo pr_cont(" (%s)", desc); 51723d1cb205STejun Heo pr_cont("\n"); 51733d1cb205STejun Heo } 51743d1cb205STejun Heo } 51753d1cb205STejun Heo 51763494fc30STejun Heo static void pr_cont_pool_info(struct worker_pool *pool) 51773494fc30STejun Heo { 51783494fc30STejun Heo pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask); 51793494fc30STejun Heo if (pool->node != NUMA_NO_NODE) 51803494fc30STejun Heo pr_cont(" node=%d", pool->node); 51813494fc30STejun Heo pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice); 51823494fc30STejun Heo } 51833494fc30STejun Heo 5184c76feb0dSPaul E. McKenney struct pr_cont_work_struct { 5185c76feb0dSPaul E. McKenney bool comma; 5186c76feb0dSPaul E. McKenney work_func_t func; 5187c76feb0dSPaul E. McKenney long ctr; 5188c76feb0dSPaul E. McKenney }; 5189c76feb0dSPaul E. McKenney 5190c76feb0dSPaul E. McKenney static void pr_cont_work_flush(bool comma, work_func_t func, struct pr_cont_work_struct *pcwsp) 5191c76feb0dSPaul E. McKenney { 5192c76feb0dSPaul E. McKenney if (!pcwsp->ctr) 5193c76feb0dSPaul E. McKenney goto out_record; 5194c76feb0dSPaul E. McKenney if (func == pcwsp->func) { 5195c76feb0dSPaul E. McKenney pcwsp->ctr++; 5196c76feb0dSPaul E. McKenney return; 5197c76feb0dSPaul E. McKenney } 5198c76feb0dSPaul E. McKenney if (pcwsp->ctr == 1) 5199c76feb0dSPaul E. McKenney pr_cont("%s %ps", pcwsp->comma ? "," : "", pcwsp->func); 5200c76feb0dSPaul E. McKenney else 5201c76feb0dSPaul E. McKenney pr_cont("%s %ld*%ps", pcwsp->comma ? "," : "", pcwsp->ctr, pcwsp->func); 5202c76feb0dSPaul E. McKenney pcwsp->ctr = 0; 5203c76feb0dSPaul E. McKenney out_record: 5204c76feb0dSPaul E. McKenney if ((long)func == -1L) 5205c76feb0dSPaul E. McKenney return; 5206c76feb0dSPaul E. McKenney pcwsp->comma = comma; 5207c76feb0dSPaul E. McKenney pcwsp->func = func; 5208c76feb0dSPaul E. McKenney pcwsp->ctr = 1; 5209c76feb0dSPaul E. McKenney } 5210c76feb0dSPaul E. McKenney 5211c76feb0dSPaul E. McKenney static void pr_cont_work(bool comma, struct work_struct *work, struct pr_cont_work_struct *pcwsp) 52123494fc30STejun Heo { 52133494fc30STejun Heo if (work->func == wq_barrier_func) { 52143494fc30STejun Heo struct wq_barrier *barr; 52153494fc30STejun Heo 52163494fc30STejun Heo barr = container_of(work, struct wq_barrier, work); 52173494fc30STejun Heo 5218c76feb0dSPaul E. McKenney pr_cont_work_flush(comma, (work_func_t)-1, pcwsp); 52193494fc30STejun Heo pr_cont("%s BAR(%d)", comma ? "," : "", 52203494fc30STejun Heo task_pid_nr(barr->task)); 52213494fc30STejun Heo } else { 5222c76feb0dSPaul E. McKenney if (!comma) 5223c76feb0dSPaul E. McKenney pr_cont_work_flush(comma, (work_func_t)-1, pcwsp); 5224c76feb0dSPaul E. McKenney pr_cont_work_flush(comma, work->func, pcwsp); 52253494fc30STejun Heo } 52263494fc30STejun Heo } 52273494fc30STejun Heo 52283494fc30STejun Heo static void show_pwq(struct pool_workqueue *pwq) 52293494fc30STejun Heo { 5230c76feb0dSPaul E. McKenney struct pr_cont_work_struct pcws = { .ctr = 0, }; 52313494fc30STejun Heo struct worker_pool *pool = pwq->pool; 52323494fc30STejun Heo struct work_struct *work; 52333494fc30STejun Heo struct worker *worker; 52343494fc30STejun Heo bool has_in_flight = false, has_pending = false; 52353494fc30STejun Heo int bkt; 52363494fc30STejun Heo 52373494fc30STejun Heo pr_info(" pwq %d:", pool->id); 52383494fc30STejun Heo pr_cont_pool_info(pool); 52393494fc30STejun Heo 524082e098f5STejun Heo pr_cont(" active=%d refcnt=%d%s\n", 524182e098f5STejun Heo pwq->nr_active, pwq->refcnt, 52423494fc30STejun Heo !list_empty(&pwq->mayday_node) ? " MAYDAY" : ""); 52433494fc30STejun Heo 52443494fc30STejun Heo hash_for_each(pool->busy_hash, bkt, worker, hentry) { 52453494fc30STejun Heo if (worker->current_pwq == pwq) { 52463494fc30STejun Heo has_in_flight = true; 52473494fc30STejun Heo break; 52483494fc30STejun Heo } 52493494fc30STejun Heo } 52503494fc30STejun Heo if (has_in_flight) { 52513494fc30STejun Heo bool comma = false; 52523494fc30STejun Heo 52533494fc30STejun Heo pr_info(" in-flight:"); 52543494fc30STejun Heo hash_for_each(pool->busy_hash, bkt, worker, hentry) { 52553494fc30STejun Heo if (worker->current_pwq != pwq) 52563494fc30STejun Heo continue; 52573494fc30STejun Heo 5258d75f773cSSakari Ailus pr_cont("%s %d%s:%ps", comma ? "," : "", 52593494fc30STejun Heo task_pid_nr(worker->task), 526030ae2fc0STejun Heo worker->rescue_wq ? "(RESCUER)" : "", 52613494fc30STejun Heo worker->current_func); 52623494fc30STejun Heo list_for_each_entry(work, &worker->scheduled, entry) 5263c76feb0dSPaul E. McKenney pr_cont_work(false, work, &pcws); 5264c76feb0dSPaul E. McKenney pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); 52653494fc30STejun Heo comma = true; 52663494fc30STejun Heo } 52673494fc30STejun Heo pr_cont("\n"); 52683494fc30STejun Heo } 52693494fc30STejun Heo 52703494fc30STejun Heo list_for_each_entry(work, &pool->worklist, entry) { 52713494fc30STejun Heo if (get_work_pwq(work) == pwq) { 52723494fc30STejun Heo has_pending = true; 52733494fc30STejun Heo break; 52743494fc30STejun Heo } 52753494fc30STejun Heo } 52763494fc30STejun Heo if (has_pending) { 52773494fc30STejun Heo bool comma = false; 52783494fc30STejun Heo 52793494fc30STejun Heo pr_info(" pending:"); 52803494fc30STejun Heo list_for_each_entry(work, &pool->worklist, entry) { 52813494fc30STejun Heo if (get_work_pwq(work) != pwq) 52823494fc30STejun Heo continue; 52833494fc30STejun Heo 5284c76feb0dSPaul E. McKenney pr_cont_work(comma, work, &pcws); 52853494fc30STejun Heo comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); 52863494fc30STejun Heo } 5287c76feb0dSPaul E. McKenney pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); 52883494fc30STejun Heo pr_cont("\n"); 52893494fc30STejun Heo } 52903494fc30STejun Heo 5291f97a4a1aSLai Jiangshan if (!list_empty(&pwq->inactive_works)) { 52923494fc30STejun Heo bool comma = false; 52933494fc30STejun Heo 5294f97a4a1aSLai Jiangshan pr_info(" inactive:"); 5295f97a4a1aSLai Jiangshan list_for_each_entry(work, &pwq->inactive_works, entry) { 5296c76feb0dSPaul E. McKenney pr_cont_work(comma, work, &pcws); 52973494fc30STejun Heo comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); 52983494fc30STejun Heo } 5299c76feb0dSPaul E. McKenney pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); 53003494fc30STejun Heo pr_cont("\n"); 53013494fc30STejun Heo } 53023494fc30STejun Heo } 53033494fc30STejun Heo 53043494fc30STejun Heo /** 530555df0933SImran Khan * show_one_workqueue - dump state of specified workqueue 530655df0933SImran Khan * @wq: workqueue whose state will be printed 53073494fc30STejun Heo */ 530855df0933SImran Khan void show_one_workqueue(struct workqueue_struct *wq) 53093494fc30STejun Heo { 53103494fc30STejun Heo struct pool_workqueue *pwq; 53113494fc30STejun Heo bool idle = true; 531255df0933SImran Khan unsigned long flags; 53133494fc30STejun Heo 53143494fc30STejun Heo for_each_pwq(pwq, wq) { 5315bad184d2STejun Heo if (!pwq_is_empty(pwq)) { 53163494fc30STejun Heo idle = false; 53173494fc30STejun Heo break; 53183494fc30STejun Heo } 53193494fc30STejun Heo } 532055df0933SImran Khan if (idle) /* Nothing to print for idle workqueue */ 532155df0933SImran Khan return; 53223494fc30STejun Heo 53233494fc30STejun Heo pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); 53243494fc30STejun Heo 53253494fc30STejun Heo for_each_pwq(pwq, wq) { 5326a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irqsave(&pwq->pool->lock, flags); 5327bad184d2STejun Heo if (!pwq_is_empty(pwq)) { 532857116ce1SJohan Hovold /* 532957116ce1SJohan Hovold * Defer printing to avoid deadlocks in console 533057116ce1SJohan Hovold * drivers that queue work while holding locks 533157116ce1SJohan Hovold * also taken in their write paths. 533257116ce1SJohan Hovold */ 533357116ce1SJohan Hovold printk_deferred_enter(); 53343494fc30STejun Heo show_pwq(pwq); 533557116ce1SJohan Hovold printk_deferred_exit(); 533657116ce1SJohan Hovold } 5337a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); 533862635ea8SSergey Senozhatsky /* 533962635ea8SSergey Senozhatsky * We could be printing a lot from atomic context, e.g. 534055df0933SImran Khan * sysrq-t -> show_all_workqueues(). Avoid triggering 534162635ea8SSergey Senozhatsky * hard lockup. 534262635ea8SSergey Senozhatsky */ 534362635ea8SSergey Senozhatsky touch_nmi_watchdog(); 53443494fc30STejun Heo } 534555df0933SImran Khan 53463494fc30STejun Heo } 53473494fc30STejun Heo 534855df0933SImran Khan /** 534955df0933SImran Khan * show_one_worker_pool - dump state of specified worker pool 535055df0933SImran Khan * @pool: worker pool whose state will be printed 535155df0933SImran Khan */ 535255df0933SImran Khan static void show_one_worker_pool(struct worker_pool *pool) 535355df0933SImran Khan { 53543494fc30STejun Heo struct worker *worker; 53553494fc30STejun Heo bool first = true; 535655df0933SImran Khan unsigned long flags; 5357335a42ebSPetr Mladek unsigned long hung = 0; 53583494fc30STejun Heo 5359a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irqsave(&pool->lock, flags); 53603494fc30STejun Heo if (pool->nr_workers == pool->nr_idle) 53613494fc30STejun Heo goto next_pool; 5362335a42ebSPetr Mladek 5363335a42ebSPetr Mladek /* How long the first pending work is waiting for a worker. */ 5364335a42ebSPetr Mladek if (!list_empty(&pool->worklist)) 5365335a42ebSPetr Mladek hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000; 5366335a42ebSPetr Mladek 536757116ce1SJohan Hovold /* 536857116ce1SJohan Hovold * Defer printing to avoid deadlocks in console drivers that 536957116ce1SJohan Hovold * queue work while holding locks also taken in their write 537057116ce1SJohan Hovold * paths. 537157116ce1SJohan Hovold */ 537257116ce1SJohan Hovold printk_deferred_enter(); 53733494fc30STejun Heo pr_info("pool %d:", pool->id); 53743494fc30STejun Heo pr_cont_pool_info(pool); 5375335a42ebSPetr Mladek pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers); 53763494fc30STejun Heo if (pool->manager) 53773494fc30STejun Heo pr_cont(" manager: %d", 53783494fc30STejun Heo task_pid_nr(pool->manager->task)); 53793494fc30STejun Heo list_for_each_entry(worker, &pool->idle_list, entry) { 53803494fc30STejun Heo pr_cont(" %s%d", first ? "idle: " : "", 53813494fc30STejun Heo task_pid_nr(worker->task)); 53823494fc30STejun Heo first = false; 53833494fc30STejun Heo } 53843494fc30STejun Heo pr_cont("\n"); 538557116ce1SJohan Hovold printk_deferred_exit(); 53863494fc30STejun Heo next_pool: 5387a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irqrestore(&pool->lock, flags); 538862635ea8SSergey Senozhatsky /* 538962635ea8SSergey Senozhatsky * We could be printing a lot from atomic context, e.g. 539055df0933SImran Khan * sysrq-t -> show_all_workqueues(). Avoid triggering 539162635ea8SSergey Senozhatsky * hard lockup. 539262635ea8SSergey Senozhatsky */ 539362635ea8SSergey Senozhatsky touch_nmi_watchdog(); 539455df0933SImran Khan 53953494fc30STejun Heo } 53963494fc30STejun Heo 539755df0933SImran Khan /** 539855df0933SImran Khan * show_all_workqueues - dump workqueue state 539955df0933SImran Khan * 5400704bc669SJungseung Lee * Called from a sysrq handler and prints out all busy workqueues and pools. 540155df0933SImran Khan */ 540255df0933SImran Khan void show_all_workqueues(void) 540355df0933SImran Khan { 540455df0933SImran Khan struct workqueue_struct *wq; 540555df0933SImran Khan struct worker_pool *pool; 540655df0933SImran Khan int pi; 540755df0933SImran Khan 540855df0933SImran Khan rcu_read_lock(); 540955df0933SImran Khan 541055df0933SImran Khan pr_info("Showing busy workqueues and worker pools:\n"); 541155df0933SImran Khan 541255df0933SImran Khan list_for_each_entry_rcu(wq, &workqueues, list) 541355df0933SImran Khan show_one_workqueue(wq); 541455df0933SImran Khan 541555df0933SImran Khan for_each_pool(pool, pi) 541655df0933SImran Khan show_one_worker_pool(pool); 541755df0933SImran Khan 541824acfb71SThomas Gleixner rcu_read_unlock(); 54193494fc30STejun Heo } 54203494fc30STejun Heo 5421704bc669SJungseung Lee /** 5422704bc669SJungseung Lee * show_freezable_workqueues - dump freezable workqueue state 5423704bc669SJungseung Lee * 5424704bc669SJungseung Lee * Called from try_to_freeze_tasks() and prints out all freezable workqueues 5425704bc669SJungseung Lee * still busy. 5426704bc669SJungseung Lee */ 5427704bc669SJungseung Lee void show_freezable_workqueues(void) 5428704bc669SJungseung Lee { 5429704bc669SJungseung Lee struct workqueue_struct *wq; 5430704bc669SJungseung Lee 5431704bc669SJungseung Lee rcu_read_lock(); 5432704bc669SJungseung Lee 5433704bc669SJungseung Lee pr_info("Showing freezable workqueues that are still busy:\n"); 5434704bc669SJungseung Lee 5435704bc669SJungseung Lee list_for_each_entry_rcu(wq, &workqueues, list) { 5436704bc669SJungseung Lee if (!(wq->flags & WQ_FREEZABLE)) 5437704bc669SJungseung Lee continue; 5438704bc669SJungseung Lee show_one_workqueue(wq); 5439704bc669SJungseung Lee } 5440704bc669SJungseung Lee 5441704bc669SJungseung Lee rcu_read_unlock(); 5442704bc669SJungseung Lee } 5443704bc669SJungseung Lee 54446b59808bSTejun Heo /* used to show worker information through /proc/PID/{comm,stat,status} */ 54456b59808bSTejun Heo void wq_worker_comm(char *buf, size_t size, struct task_struct *task) 54466b59808bSTejun Heo { 54476b59808bSTejun Heo int off; 54486b59808bSTejun Heo 54496b59808bSTejun Heo /* always show the actual comm */ 54506b59808bSTejun Heo off = strscpy(buf, task->comm, size); 54516b59808bSTejun Heo if (off < 0) 54526b59808bSTejun Heo return; 54536b59808bSTejun Heo 5454197f6accSTejun Heo /* stabilize PF_WQ_WORKER and worker pool association */ 54556b59808bSTejun Heo mutex_lock(&wq_pool_attach_mutex); 54566b59808bSTejun Heo 5457197f6accSTejun Heo if (task->flags & PF_WQ_WORKER) { 5458197f6accSTejun Heo struct worker *worker = kthread_data(task); 5459197f6accSTejun Heo struct worker_pool *pool = worker->pool; 54606b59808bSTejun Heo 54616b59808bSTejun Heo if (pool) { 5462a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pool->lock); 54636b59808bSTejun Heo /* 5464197f6accSTejun Heo * ->desc tracks information (wq name or 5465197f6accSTejun Heo * set_worker_desc()) for the latest execution. If 5466197f6accSTejun Heo * current, prepend '+', otherwise '-'. 54676b59808bSTejun Heo */ 54686b59808bSTejun Heo if (worker->desc[0] != '\0') { 54696b59808bSTejun Heo if (worker->current_work) 54706b59808bSTejun Heo scnprintf(buf + off, size - off, "+%s", 54716b59808bSTejun Heo worker->desc); 54726b59808bSTejun Heo else 54736b59808bSTejun Heo scnprintf(buf + off, size - off, "-%s", 54746b59808bSTejun Heo worker->desc); 54756b59808bSTejun Heo } 5476a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 54776b59808bSTejun Heo } 5478197f6accSTejun Heo } 54796b59808bSTejun Heo 54806b59808bSTejun Heo mutex_unlock(&wq_pool_attach_mutex); 54816b59808bSTejun Heo } 54826b59808bSTejun Heo 548366448bc2SMathieu Malaterre #ifdef CONFIG_SMP 548466448bc2SMathieu Malaterre 5485db7bccf4STejun Heo /* 5486db7bccf4STejun Heo * CPU hotplug. 5487db7bccf4STejun Heo * 5488e22bee78STejun Heo * There are two challenges in supporting CPU hotplug. Firstly, there 5489112202d9STejun Heo * are a lot of assumptions on strong associations among work, pwq and 5490706026c2STejun Heo * pool which make migrating pending and scheduled works very 5491e22bee78STejun Heo * difficult to implement without impacting hot paths. Secondly, 549294cf58bbSTejun Heo * worker pools serve mix of short, long and very long running works making 5493e22bee78STejun Heo * blocked draining impractical. 5494e22bee78STejun Heo * 549524647570STejun Heo * This is solved by allowing the pools to be disassociated from the CPU 5496628c78e7STejun Heo * running as an unbound one and allowing it to be reattached later if the 5497628c78e7STejun Heo * cpu comes back online. 5498db7bccf4STejun Heo */ 5499db7bccf4STejun Heo 5500e8b3f8dbSLai Jiangshan static void unbind_workers(int cpu) 5501db7bccf4STejun Heo { 55024ce62e9eSTejun Heo struct worker_pool *pool; 5503db7bccf4STejun Heo struct worker *worker; 5504db7bccf4STejun Heo 5505f02ae73aSTejun Heo for_each_cpu_worker_pool(pool, cpu) { 55061258fae7STejun Heo mutex_lock(&wq_pool_attach_mutex); 5507a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pool->lock); 5508e22bee78STejun Heo 5509f2d5a0eeSTejun Heo /* 551092f9c5c4SLai Jiangshan * We've blocked all attach/detach operations. Make all workers 551194cf58bbSTejun Heo * unbound and set DISASSOCIATED. Before this, all workers 551211b45b0bSLai Jiangshan * must be on the cpu. After this, they may become diasporas. 5513b4ac9384SLai Jiangshan * And the preemption disabled section in their sched callbacks 5514b4ac9384SLai Jiangshan * are guaranteed to see WORKER_UNBOUND since the code here 5515b4ac9384SLai Jiangshan * is on the same cpu. 5516f2d5a0eeSTejun Heo */ 5517da028469SLai Jiangshan for_each_pool_worker(worker, pool) 5518403c821dSTejun Heo worker->flags |= WORKER_UNBOUND; 5519db7bccf4STejun Heo 552024647570STejun Heo pool->flags |= POOL_DISASSOCIATED; 5521f2d5a0eeSTejun Heo 5522e22bee78STejun Heo /* 5523989442d7SLai Jiangshan * The handling of nr_running in sched callbacks are disabled 5524989442d7SLai Jiangshan * now. Zap nr_running. After this, nr_running stays zero and 5525989442d7SLai Jiangshan * need_more_worker() and keep_working() are always true as 5526989442d7SLai Jiangshan * long as the worklist is not empty. This pool now behaves as 5527989442d7SLai Jiangshan * an unbound (in terms of concurrency management) pool which 5528eb283428SLai Jiangshan * are served by workers tied to the pool. 5529e22bee78STejun Heo */ 5530bc35f7efSLai Jiangshan pool->nr_running = 0; 5531eb283428SLai Jiangshan 5532eb283428SLai Jiangshan /* 5533eb283428SLai Jiangshan * With concurrency management just turned off, a busy 5534eb283428SLai Jiangshan * worker blocking could lead to lengthy stalls. Kick off 5535eb283428SLai Jiangshan * unbound chain execution of currently pending work items. 5536eb283428SLai Jiangshan */ 55370219a352STejun Heo kick_pool(pool); 5538989442d7SLai Jiangshan 5539a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 5540989442d7SLai Jiangshan 5541793777bcSValentin Schneider for_each_pool_worker(worker, pool) 5542793777bcSValentin Schneider unbind_worker(worker); 5543989442d7SLai Jiangshan 5544989442d7SLai Jiangshan mutex_unlock(&wq_pool_attach_mutex); 5545eb283428SLai Jiangshan } 5546db7bccf4STejun Heo } 5547db7bccf4STejun Heo 5548bd7c089eSTejun Heo /** 5549bd7c089eSTejun Heo * rebind_workers - rebind all workers of a pool to the associated CPU 5550bd7c089eSTejun Heo * @pool: pool of interest 5551bd7c089eSTejun Heo * 5552a9ab775bSTejun Heo * @pool->cpu is coming online. Rebind all workers to the CPU. 5553bd7c089eSTejun Heo */ 5554bd7c089eSTejun Heo static void rebind_workers(struct worker_pool *pool) 5555bd7c089eSTejun Heo { 5556a9ab775bSTejun Heo struct worker *worker; 5557bd7c089eSTejun Heo 55581258fae7STejun Heo lockdep_assert_held(&wq_pool_attach_mutex); 5559bd7c089eSTejun Heo 5560bd7c089eSTejun Heo /* 5561a9ab775bSTejun Heo * Restore CPU affinity of all workers. As all idle workers should 5562a9ab775bSTejun Heo * be on the run-queue of the associated CPU before any local 5563402dd89dSShailendra Verma * wake-ups for concurrency management happen, restore CPU affinity 5564a9ab775bSTejun Heo * of all workers first and then clear UNBOUND. As we're called 5565a9ab775bSTejun Heo * from CPU_ONLINE, the following shouldn't fail. 5566bd7c089eSTejun Heo */ 5567c63a2e52SValentin Schneider for_each_pool_worker(worker, pool) { 5568c63a2e52SValentin Schneider kthread_set_per_cpu(worker->task, pool->cpu); 5569c63a2e52SValentin Schneider WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, 55709546b29eSTejun Heo pool_allowed_cpus(pool)) < 0); 5571c63a2e52SValentin Schneider } 5572a9ab775bSTejun Heo 5573a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pool->lock); 5574f7c17d26SWanpeng Li 55753de5e884SLai Jiangshan pool->flags &= ~POOL_DISASSOCIATED; 5576a9ab775bSTejun Heo 5577da028469SLai Jiangshan for_each_pool_worker(worker, pool) { 5578a9ab775bSTejun Heo unsigned int worker_flags = worker->flags; 5579a9ab775bSTejun Heo 5580a9ab775bSTejun Heo /* 5581a9ab775bSTejun Heo * We want to clear UNBOUND but can't directly call 5582a9ab775bSTejun Heo * worker_clr_flags() or adjust nr_running. Atomically 5583a9ab775bSTejun Heo * replace UNBOUND with another NOT_RUNNING flag REBOUND. 5584a9ab775bSTejun Heo * @worker will clear REBOUND using worker_clr_flags() when 5585a9ab775bSTejun Heo * it initiates the next execution cycle thus restoring 5586a9ab775bSTejun Heo * concurrency management. Note that when or whether 5587a9ab775bSTejun Heo * @worker clears REBOUND doesn't affect correctness. 5588a9ab775bSTejun Heo * 5589c95491edSMark Rutland * WRITE_ONCE() is necessary because @worker->flags may be 5590a9ab775bSTejun Heo * tested without holding any lock in 55916d25be57SThomas Gleixner * wq_worker_running(). Without it, NOT_RUNNING test may 5592a9ab775bSTejun Heo * fail incorrectly leading to premature concurrency 5593a9ab775bSTejun Heo * management operations. 5594bd7c089eSTejun Heo */ 5595a9ab775bSTejun Heo WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND)); 5596a9ab775bSTejun Heo worker_flags |= WORKER_REBOUND; 5597a9ab775bSTejun Heo worker_flags &= ~WORKER_UNBOUND; 5598c95491edSMark Rutland WRITE_ONCE(worker->flags, worker_flags); 5599bd7c089eSTejun Heo } 5600a9ab775bSTejun Heo 5601a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 5602bd7c089eSTejun Heo } 5603bd7c089eSTejun Heo 56047dbc725eSTejun Heo /** 56057dbc725eSTejun Heo * restore_unbound_workers_cpumask - restore cpumask of unbound workers 56067dbc725eSTejun Heo * @pool: unbound pool of interest 56077dbc725eSTejun Heo * @cpu: the CPU which is coming up 56087dbc725eSTejun Heo * 56097dbc725eSTejun Heo * An unbound pool may end up with a cpumask which doesn't have any online 56107dbc725eSTejun Heo * CPUs. When a worker of such pool get scheduled, the scheduler resets 56117dbc725eSTejun Heo * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any 56127dbc725eSTejun Heo * online CPU before, cpus_allowed of all its workers should be restored. 56137dbc725eSTejun Heo */ 56147dbc725eSTejun Heo static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) 56157dbc725eSTejun Heo { 56167dbc725eSTejun Heo static cpumask_t cpumask; 56177dbc725eSTejun Heo struct worker *worker; 56187dbc725eSTejun Heo 56191258fae7STejun Heo lockdep_assert_held(&wq_pool_attach_mutex); 56207dbc725eSTejun Heo 56217dbc725eSTejun Heo /* is @cpu allowed for @pool? */ 56227dbc725eSTejun Heo if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) 56237dbc725eSTejun Heo return; 56247dbc725eSTejun Heo 56257dbc725eSTejun Heo cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask); 56267dbc725eSTejun Heo 56277dbc725eSTejun Heo /* as we're called from CPU_ONLINE, the following shouldn't fail */ 5628da028469SLai Jiangshan for_each_pool_worker(worker, pool) 5629d945b5e9SPeter Zijlstra WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0); 56307dbc725eSTejun Heo } 56317dbc725eSTejun Heo 56327ee681b2SThomas Gleixner int workqueue_prepare_cpu(unsigned int cpu) 56331da177e4SLinus Torvalds { 56344ce62e9eSTejun Heo struct worker_pool *pool; 56351da177e4SLinus Torvalds 5636f02ae73aSTejun Heo for_each_cpu_worker_pool(pool, cpu) { 56373ce63377STejun Heo if (pool->nr_workers) 56383ce63377STejun Heo continue; 5639051e1850SLai Jiangshan if (!create_worker(pool)) 56407ee681b2SThomas Gleixner return -ENOMEM; 56413af24433SOleg Nesterov } 56427ee681b2SThomas Gleixner return 0; 56437ee681b2SThomas Gleixner } 56441da177e4SLinus Torvalds 56457ee681b2SThomas Gleixner int workqueue_online_cpu(unsigned int cpu) 56467ee681b2SThomas Gleixner { 56477ee681b2SThomas Gleixner struct worker_pool *pool; 56487ee681b2SThomas Gleixner struct workqueue_struct *wq; 56497ee681b2SThomas Gleixner int pi; 56507ee681b2SThomas Gleixner 565168e13a67SLai Jiangshan mutex_lock(&wq_pool_mutex); 56527dbc725eSTejun Heo 56537dbc725eSTejun Heo for_each_pool(pool, pi) { 56541258fae7STejun Heo mutex_lock(&wq_pool_attach_mutex); 565594cf58bbSTejun Heo 5656f05b558dSLai Jiangshan if (pool->cpu == cpu) 565794cf58bbSTejun Heo rebind_workers(pool); 5658f05b558dSLai Jiangshan else if (pool->cpu < 0) 56597dbc725eSTejun Heo restore_unbound_workers_cpumask(pool, cpu); 566094cf58bbSTejun Heo 56611258fae7STejun Heo mutex_unlock(&wq_pool_attach_mutex); 566294cf58bbSTejun Heo } 56637dbc725eSTejun Heo 5664fef59c9cSTejun Heo /* update pod affinity of unbound workqueues */ 56654cbfd3deSTejun Heo list_for_each_entry(wq, &workqueues, list) { 566684193c07STejun Heo struct workqueue_attrs *attrs = wq->unbound_attrs; 566784193c07STejun Heo 566884193c07STejun Heo if (attrs) { 566984193c07STejun Heo const struct wq_pod_type *pt = wqattrs_pod_type(attrs); 56704cbfd3deSTejun Heo int tcpu; 56714cbfd3deSTejun Heo 567284193c07STejun Heo for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]]) 5673fef59c9cSTejun Heo wq_update_pod(wq, tcpu, cpu, true); 56744cbfd3deSTejun Heo } 56754cbfd3deSTejun Heo } 56764c16bd32STejun Heo 567768e13a67SLai Jiangshan mutex_unlock(&wq_pool_mutex); 56787ee681b2SThomas Gleixner return 0; 567965758202STejun Heo } 568065758202STejun Heo 56817ee681b2SThomas Gleixner int workqueue_offline_cpu(unsigned int cpu) 568265758202STejun Heo { 56834c16bd32STejun Heo struct workqueue_struct *wq; 56848db25e78STejun Heo 56854c16bd32STejun Heo /* unbinding per-cpu workers should happen on the local CPU */ 5686e8b3f8dbSLai Jiangshan if (WARN_ON(cpu != smp_processor_id())) 5687e8b3f8dbSLai Jiangshan return -1; 5688e8b3f8dbSLai Jiangshan 5689e8b3f8dbSLai Jiangshan unbind_workers(cpu); 56904c16bd32STejun Heo 5691fef59c9cSTejun Heo /* update pod affinity of unbound workqueues */ 56924c16bd32STejun Heo mutex_lock(&wq_pool_mutex); 56934cbfd3deSTejun Heo list_for_each_entry(wq, &workqueues, list) { 569484193c07STejun Heo struct workqueue_attrs *attrs = wq->unbound_attrs; 569584193c07STejun Heo 569684193c07STejun Heo if (attrs) { 569784193c07STejun Heo const struct wq_pod_type *pt = wqattrs_pod_type(attrs); 56984cbfd3deSTejun Heo int tcpu; 56994cbfd3deSTejun Heo 570084193c07STejun Heo for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]]) 5701fef59c9cSTejun Heo wq_update_pod(wq, tcpu, cpu, false); 57024cbfd3deSTejun Heo } 57034cbfd3deSTejun Heo } 57044c16bd32STejun Heo mutex_unlock(&wq_pool_mutex); 57054c16bd32STejun Heo 57067ee681b2SThomas Gleixner return 0; 570765758202STejun Heo } 570865758202STejun Heo 57092d3854a3SRusty Russell struct work_for_cpu { 5710ed48ece2STejun Heo struct work_struct work; 57112d3854a3SRusty Russell long (*fn)(void *); 57122d3854a3SRusty Russell void *arg; 57132d3854a3SRusty Russell long ret; 57142d3854a3SRusty Russell }; 57152d3854a3SRusty Russell 5716ed48ece2STejun Heo static void work_for_cpu_fn(struct work_struct *work) 57172d3854a3SRusty Russell { 5718ed48ece2STejun Heo struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); 5719ed48ece2STejun Heo 57202d3854a3SRusty Russell wfc->ret = wfc->fn(wfc->arg); 57212d3854a3SRusty Russell } 57222d3854a3SRusty Russell 57232d3854a3SRusty Russell /** 5724be2355b7SFrederic Weisbecker * work_on_cpu_key - run a function in thread context on a particular cpu 57252d3854a3SRusty Russell * @cpu: the cpu to run on 57262d3854a3SRusty Russell * @fn: the function to run 57272d3854a3SRusty Russell * @arg: the function arg 5728be2355b7SFrederic Weisbecker * @key: The lock class key for lock debugging purposes 57292d3854a3SRusty Russell * 573031ad9081SRusty Russell * It is up to the caller to ensure that the cpu doesn't go offline. 57316b44003eSAndrew Morton * The caller must not hold any locks which would prevent @fn from completing. 5732d185af30SYacine Belkadi * 5733d185af30SYacine Belkadi * Return: The value @fn returns. 57342d3854a3SRusty Russell */ 5735be2355b7SFrederic Weisbecker long work_on_cpu_key(int cpu, long (*fn)(void *), 5736be2355b7SFrederic Weisbecker void *arg, struct lock_class_key *key) 57372d3854a3SRusty Russell { 5738ed48ece2STejun Heo struct work_for_cpu wfc = { .fn = fn, .arg = arg }; 57392d3854a3SRusty Russell 5740be2355b7SFrederic Weisbecker INIT_WORK_ONSTACK_KEY(&wfc.work, work_for_cpu_fn, key); 5741ed48ece2STejun Heo schedule_work_on(cpu, &wfc.work); 574212997d1aSBjorn Helgaas flush_work(&wfc.work); 5743440a1136SChuansheng Liu destroy_work_on_stack(&wfc.work); 57442d3854a3SRusty Russell return wfc.ret; 57452d3854a3SRusty Russell } 5746be2355b7SFrederic Weisbecker EXPORT_SYMBOL_GPL(work_on_cpu_key); 57470e8d6a93SThomas Gleixner 57480e8d6a93SThomas Gleixner /** 5749be2355b7SFrederic Weisbecker * work_on_cpu_safe_key - run a function in thread context on a particular cpu 57500e8d6a93SThomas Gleixner * @cpu: the cpu to run on 57510e8d6a93SThomas Gleixner * @fn: the function to run 57520e8d6a93SThomas Gleixner * @arg: the function argument 5753be2355b7SFrederic Weisbecker * @key: The lock class key for lock debugging purposes 57540e8d6a93SThomas Gleixner * 57550e8d6a93SThomas Gleixner * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold 57560e8d6a93SThomas Gleixner * any locks which would prevent @fn from completing. 57570e8d6a93SThomas Gleixner * 57580e8d6a93SThomas Gleixner * Return: The value @fn returns. 57590e8d6a93SThomas Gleixner */ 5760be2355b7SFrederic Weisbecker long work_on_cpu_safe_key(int cpu, long (*fn)(void *), 5761be2355b7SFrederic Weisbecker void *arg, struct lock_class_key *key) 57620e8d6a93SThomas Gleixner { 57630e8d6a93SThomas Gleixner long ret = -ENODEV; 57640e8d6a93SThomas Gleixner 5765ffd8bea8SSebastian Andrzej Siewior cpus_read_lock(); 57660e8d6a93SThomas Gleixner if (cpu_online(cpu)) 5767be2355b7SFrederic Weisbecker ret = work_on_cpu_key(cpu, fn, arg, key); 5768ffd8bea8SSebastian Andrzej Siewior cpus_read_unlock(); 57690e8d6a93SThomas Gleixner return ret; 57700e8d6a93SThomas Gleixner } 5771be2355b7SFrederic Weisbecker EXPORT_SYMBOL_GPL(work_on_cpu_safe_key); 57722d3854a3SRusty Russell #endif /* CONFIG_SMP */ 57732d3854a3SRusty Russell 5774a0a1a5fdSTejun Heo #ifdef CONFIG_FREEZER 5775e7577c50SRusty Russell 5776a0a1a5fdSTejun Heo /** 5777a0a1a5fdSTejun Heo * freeze_workqueues_begin - begin freezing workqueues 5778a0a1a5fdSTejun Heo * 577958a69cb4STejun Heo * Start freezing workqueues. After this function returns, all freezable 5780f97a4a1aSLai Jiangshan * workqueues will queue new works to their inactive_works list instead of 5781706026c2STejun Heo * pool->worklist. 5782a0a1a5fdSTejun Heo * 5783a0a1a5fdSTejun Heo * CONTEXT: 5784a357fc03SLai Jiangshan * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's. 5785a0a1a5fdSTejun Heo */ 5786a0a1a5fdSTejun Heo void freeze_workqueues_begin(void) 5787a0a1a5fdSTejun Heo { 578824b8a847STejun Heo struct workqueue_struct *wq; 5789a0a1a5fdSTejun Heo 579068e13a67SLai Jiangshan mutex_lock(&wq_pool_mutex); 5791a0a1a5fdSTejun Heo 57926183c009STejun Heo WARN_ON_ONCE(workqueue_freezing); 5793a0a1a5fdSTejun Heo workqueue_freezing = true; 5794a0a1a5fdSTejun Heo 579524b8a847STejun Heo list_for_each_entry(wq, &workqueues, list) { 5796a357fc03SLai Jiangshan mutex_lock(&wq->mutex); 579782e098f5STejun Heo wq_adjust_max_active(wq); 5798a357fc03SLai Jiangshan mutex_unlock(&wq->mutex); 5799a1056305STejun Heo } 58005bcab335STejun Heo 580168e13a67SLai Jiangshan mutex_unlock(&wq_pool_mutex); 5802a0a1a5fdSTejun Heo } 5803a0a1a5fdSTejun Heo 5804a0a1a5fdSTejun Heo /** 580558a69cb4STejun Heo * freeze_workqueues_busy - are freezable workqueues still busy? 5806a0a1a5fdSTejun Heo * 5807a0a1a5fdSTejun Heo * Check whether freezing is complete. This function must be called 5808a0a1a5fdSTejun Heo * between freeze_workqueues_begin() and thaw_workqueues(). 5809a0a1a5fdSTejun Heo * 5810a0a1a5fdSTejun Heo * CONTEXT: 581168e13a67SLai Jiangshan * Grabs and releases wq_pool_mutex. 5812a0a1a5fdSTejun Heo * 5813d185af30SYacine Belkadi * Return: 581458a69cb4STejun Heo * %true if some freezable workqueues are still busy. %false if freezing 581558a69cb4STejun Heo * is complete. 5816a0a1a5fdSTejun Heo */ 5817a0a1a5fdSTejun Heo bool freeze_workqueues_busy(void) 5818a0a1a5fdSTejun Heo { 5819a0a1a5fdSTejun Heo bool busy = false; 582024b8a847STejun Heo struct workqueue_struct *wq; 582124b8a847STejun Heo struct pool_workqueue *pwq; 5822a0a1a5fdSTejun Heo 582368e13a67SLai Jiangshan mutex_lock(&wq_pool_mutex); 5824a0a1a5fdSTejun Heo 58256183c009STejun Heo WARN_ON_ONCE(!workqueue_freezing); 5826a0a1a5fdSTejun Heo 582724b8a847STejun Heo list_for_each_entry(wq, &workqueues, list) { 582824b8a847STejun Heo if (!(wq->flags & WQ_FREEZABLE)) 582924b8a847STejun Heo continue; 5830a0a1a5fdSTejun Heo /* 5831a0a1a5fdSTejun Heo * nr_active is monotonically decreasing. It's safe 5832a0a1a5fdSTejun Heo * to peek without lock. 5833a0a1a5fdSTejun Heo */ 583424acfb71SThomas Gleixner rcu_read_lock(); 583524b8a847STejun Heo for_each_pwq(pwq, wq) { 58366183c009STejun Heo WARN_ON_ONCE(pwq->nr_active < 0); 5837112202d9STejun Heo if (pwq->nr_active) { 5838a0a1a5fdSTejun Heo busy = true; 583924acfb71SThomas Gleixner rcu_read_unlock(); 5840a0a1a5fdSTejun Heo goto out_unlock; 5841a0a1a5fdSTejun Heo } 5842a0a1a5fdSTejun Heo } 584324acfb71SThomas Gleixner rcu_read_unlock(); 5844a0a1a5fdSTejun Heo } 5845a0a1a5fdSTejun Heo out_unlock: 584668e13a67SLai Jiangshan mutex_unlock(&wq_pool_mutex); 5847a0a1a5fdSTejun Heo return busy; 5848a0a1a5fdSTejun Heo } 5849a0a1a5fdSTejun Heo 5850a0a1a5fdSTejun Heo /** 5851a0a1a5fdSTejun Heo * thaw_workqueues - thaw workqueues 5852a0a1a5fdSTejun Heo * 5853a0a1a5fdSTejun Heo * Thaw workqueues. Normal queueing is restored and all collected 5854706026c2STejun Heo * frozen works are transferred to their respective pool worklists. 5855a0a1a5fdSTejun Heo * 5856a0a1a5fdSTejun Heo * CONTEXT: 5857a357fc03SLai Jiangshan * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's. 5858a0a1a5fdSTejun Heo */ 5859a0a1a5fdSTejun Heo void thaw_workqueues(void) 5860a0a1a5fdSTejun Heo { 586124b8a847STejun Heo struct workqueue_struct *wq; 5862a0a1a5fdSTejun Heo 586368e13a67SLai Jiangshan mutex_lock(&wq_pool_mutex); 5864a0a1a5fdSTejun Heo 5865a0a1a5fdSTejun Heo if (!workqueue_freezing) 5866a0a1a5fdSTejun Heo goto out_unlock; 5867a0a1a5fdSTejun Heo 586874b414eaSLai Jiangshan workqueue_freezing = false; 586924b8a847STejun Heo 587024b8a847STejun Heo /* restore max_active and repopulate worklist */ 587124b8a847STejun Heo list_for_each_entry(wq, &workqueues, list) { 5872a357fc03SLai Jiangshan mutex_lock(&wq->mutex); 587382e098f5STejun Heo wq_adjust_max_active(wq); 5874a357fc03SLai Jiangshan mutex_unlock(&wq->mutex); 587524b8a847STejun Heo } 587624b8a847STejun Heo 5877a0a1a5fdSTejun Heo out_unlock: 587868e13a67SLai Jiangshan mutex_unlock(&wq_pool_mutex); 5879a0a1a5fdSTejun Heo } 5880a0a1a5fdSTejun Heo #endif /* CONFIG_FREEZER */ 5881a0a1a5fdSTejun Heo 588299c621efSLai Jiangshan static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask) 5883042f7df1SLai Jiangshan { 5884042f7df1SLai Jiangshan LIST_HEAD(ctxs); 5885042f7df1SLai Jiangshan int ret = 0; 5886042f7df1SLai Jiangshan struct workqueue_struct *wq; 5887042f7df1SLai Jiangshan struct apply_wqattrs_ctx *ctx, *n; 5888042f7df1SLai Jiangshan 5889042f7df1SLai Jiangshan lockdep_assert_held(&wq_pool_mutex); 5890042f7df1SLai Jiangshan 5891042f7df1SLai Jiangshan list_for_each_entry(wq, &workqueues, list) { 5892042f7df1SLai Jiangshan if (!(wq->flags & WQ_UNBOUND)) 5893042f7df1SLai Jiangshan continue; 5894042f7df1SLai Jiangshan /* creating multiple pwqs breaks ordering guarantee */ 58955ad73e10STejun Heo if (wq->flags & __WQ_ORDERED) 5896042f7df1SLai Jiangshan continue; 5897042f7df1SLai Jiangshan 589899c621efSLai Jiangshan ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask); 589984193c07STejun Heo if (IS_ERR(ctx)) { 590084193c07STejun Heo ret = PTR_ERR(ctx); 5901042f7df1SLai Jiangshan break; 5902042f7df1SLai Jiangshan } 5903042f7df1SLai Jiangshan 5904042f7df1SLai Jiangshan list_add_tail(&ctx->list, &ctxs); 5905042f7df1SLai Jiangshan } 5906042f7df1SLai Jiangshan 5907042f7df1SLai Jiangshan list_for_each_entry_safe(ctx, n, &ctxs, list) { 5908042f7df1SLai Jiangshan if (!ret) 5909042f7df1SLai Jiangshan apply_wqattrs_commit(ctx); 5910042f7df1SLai Jiangshan apply_wqattrs_cleanup(ctx); 5911042f7df1SLai Jiangshan } 5912042f7df1SLai Jiangshan 591399c621efSLai Jiangshan if (!ret) { 591499c621efSLai Jiangshan mutex_lock(&wq_pool_attach_mutex); 591599c621efSLai Jiangshan cpumask_copy(wq_unbound_cpumask, unbound_cpumask); 591699c621efSLai Jiangshan mutex_unlock(&wq_pool_attach_mutex); 591799c621efSLai Jiangshan } 5918042f7df1SLai Jiangshan return ret; 5919042f7df1SLai Jiangshan } 5920042f7df1SLai Jiangshan 5921042f7df1SLai Jiangshan /** 5922042f7df1SLai Jiangshan * workqueue_set_unbound_cpumask - Set the low-level unbound cpumask 5923042f7df1SLai Jiangshan * @cpumask: the cpumask to set 5924042f7df1SLai Jiangshan * 5925042f7df1SLai Jiangshan * The low-level workqueues cpumask is a global cpumask that limits 5926042f7df1SLai Jiangshan * the affinity of all unbound workqueues. This function check the @cpumask 5927042f7df1SLai Jiangshan * and apply it to all unbound workqueues and updates all pwqs of them. 5928042f7df1SLai Jiangshan * 592967dc8325SCai Huoqing * Return: 0 - Success 5930042f7df1SLai Jiangshan * -EINVAL - Invalid @cpumask 5931042f7df1SLai Jiangshan * -ENOMEM - Failed to allocate memory for attrs or pwqs. 5932042f7df1SLai Jiangshan */ 5933042f7df1SLai Jiangshan int workqueue_set_unbound_cpumask(cpumask_var_t cpumask) 5934042f7df1SLai Jiangshan { 5935042f7df1SLai Jiangshan int ret = -EINVAL; 5936042f7df1SLai Jiangshan 5937c98a9805STal Shorer /* 5938c98a9805STal Shorer * Not excluding isolated cpus on purpose. 5939c98a9805STal Shorer * If the user wishes to include them, we allow that. 5940c98a9805STal Shorer */ 5941042f7df1SLai Jiangshan cpumask_and(cpumask, cpumask, cpu_possible_mask); 5942042f7df1SLai Jiangshan if (!cpumask_empty(cpumask)) { 5943a0111cf6SLai Jiangshan apply_wqattrs_lock(); 5944d25302e4SMenglong Dong if (cpumask_equal(cpumask, wq_unbound_cpumask)) { 5945d25302e4SMenglong Dong ret = 0; 5946d25302e4SMenglong Dong goto out_unlock; 5947d25302e4SMenglong Dong } 5948d25302e4SMenglong Dong 594999c621efSLai Jiangshan ret = workqueue_apply_unbound_cpumask(cpumask); 5950042f7df1SLai Jiangshan 5951d25302e4SMenglong Dong out_unlock: 5952a0111cf6SLai Jiangshan apply_wqattrs_unlock(); 5953042f7df1SLai Jiangshan } 5954042f7df1SLai Jiangshan 5955042f7df1SLai Jiangshan return ret; 5956042f7df1SLai Jiangshan } 5957042f7df1SLai Jiangshan 595863c5484eSTejun Heo static int parse_affn_scope(const char *val) 595963c5484eSTejun Heo { 596063c5484eSTejun Heo int i; 596163c5484eSTejun Heo 596263c5484eSTejun Heo for (i = 0; i < ARRAY_SIZE(wq_affn_names); i++) { 596363c5484eSTejun Heo if (!strncasecmp(val, wq_affn_names[i], strlen(wq_affn_names[i]))) 596463c5484eSTejun Heo return i; 596563c5484eSTejun Heo } 596663c5484eSTejun Heo return -EINVAL; 596763c5484eSTejun Heo } 596863c5484eSTejun Heo 596963c5484eSTejun Heo static int wq_affn_dfl_set(const char *val, const struct kernel_param *kp) 597063c5484eSTejun Heo { 5971523a301eSTejun Heo struct workqueue_struct *wq; 5972523a301eSTejun Heo int affn, cpu; 597363c5484eSTejun Heo 597463c5484eSTejun Heo affn = parse_affn_scope(val); 597563c5484eSTejun Heo if (affn < 0) 597663c5484eSTejun Heo return affn; 5977523a301eSTejun Heo if (affn == WQ_AFFN_DFL) 5978523a301eSTejun Heo return -EINVAL; 5979523a301eSTejun Heo 5980523a301eSTejun Heo cpus_read_lock(); 5981523a301eSTejun Heo mutex_lock(&wq_pool_mutex); 598263c5484eSTejun Heo 598363c5484eSTejun Heo wq_affn_dfl = affn; 5984523a301eSTejun Heo 5985523a301eSTejun Heo list_for_each_entry(wq, &workqueues, list) { 5986523a301eSTejun Heo for_each_online_cpu(cpu) { 5987523a301eSTejun Heo wq_update_pod(wq, cpu, cpu, true); 5988523a301eSTejun Heo } 5989523a301eSTejun Heo } 5990523a301eSTejun Heo 5991523a301eSTejun Heo mutex_unlock(&wq_pool_mutex); 5992523a301eSTejun Heo cpus_read_unlock(); 5993523a301eSTejun Heo 599463c5484eSTejun Heo return 0; 599563c5484eSTejun Heo } 599663c5484eSTejun Heo 599763c5484eSTejun Heo static int wq_affn_dfl_get(char *buffer, const struct kernel_param *kp) 599863c5484eSTejun Heo { 599963c5484eSTejun Heo return scnprintf(buffer, PAGE_SIZE, "%s\n", wq_affn_names[wq_affn_dfl]); 600063c5484eSTejun Heo } 600163c5484eSTejun Heo 600263c5484eSTejun Heo static const struct kernel_param_ops wq_affn_dfl_ops = { 600363c5484eSTejun Heo .set = wq_affn_dfl_set, 600463c5484eSTejun Heo .get = wq_affn_dfl_get, 600563c5484eSTejun Heo }; 600663c5484eSTejun Heo 600763c5484eSTejun Heo module_param_cb(default_affinity_scope, &wq_affn_dfl_ops, NULL, 0644); 600863c5484eSTejun Heo 60096ba94429SFrederic Weisbecker #ifdef CONFIG_SYSFS 60106ba94429SFrederic Weisbecker /* 60116ba94429SFrederic Weisbecker * Workqueues with WQ_SYSFS flag set is visible to userland via 60126ba94429SFrederic Weisbecker * /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the 60136ba94429SFrederic Weisbecker * following attributes. 60146ba94429SFrederic Weisbecker * 60156ba94429SFrederic Weisbecker * per_cpu RO bool : whether the workqueue is per-cpu or unbound 60166ba94429SFrederic Weisbecker * max_active RW int : maximum number of in-flight work items 60176ba94429SFrederic Weisbecker * 60186ba94429SFrederic Weisbecker * Unbound workqueues have the following extra attributes. 60196ba94429SFrederic Weisbecker * 60206ba94429SFrederic Weisbecker * nice RW int : nice value of the workers 60216ba94429SFrederic Weisbecker * cpumask RW mask : bitmask of allowed CPUs for the workers 602263c5484eSTejun Heo * affinity_scope RW str : worker CPU affinity scope (cache, numa, none) 60238639ecebSTejun Heo * affinity_strict RW bool : worker CPU affinity is strict 60246ba94429SFrederic Weisbecker */ 60256ba94429SFrederic Weisbecker struct wq_device { 60266ba94429SFrederic Weisbecker struct workqueue_struct *wq; 60276ba94429SFrederic Weisbecker struct device dev; 60286ba94429SFrederic Weisbecker }; 60296ba94429SFrederic Weisbecker 60306ba94429SFrederic Weisbecker static struct workqueue_struct *dev_to_wq(struct device *dev) 60316ba94429SFrederic Weisbecker { 60326ba94429SFrederic Weisbecker struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); 60336ba94429SFrederic Weisbecker 60346ba94429SFrederic Weisbecker return wq_dev->wq; 60356ba94429SFrederic Weisbecker } 60366ba94429SFrederic Weisbecker 60376ba94429SFrederic Weisbecker static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr, 60386ba94429SFrederic Weisbecker char *buf) 60396ba94429SFrederic Weisbecker { 60406ba94429SFrederic Weisbecker struct workqueue_struct *wq = dev_to_wq(dev); 60416ba94429SFrederic Weisbecker 60426ba94429SFrederic Weisbecker return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND)); 60436ba94429SFrederic Weisbecker } 60446ba94429SFrederic Weisbecker static DEVICE_ATTR_RO(per_cpu); 60456ba94429SFrederic Weisbecker 60466ba94429SFrederic Weisbecker static ssize_t max_active_show(struct device *dev, 60476ba94429SFrederic Weisbecker struct device_attribute *attr, char *buf) 60486ba94429SFrederic Weisbecker { 60496ba94429SFrederic Weisbecker struct workqueue_struct *wq = dev_to_wq(dev); 60506ba94429SFrederic Weisbecker 60516ba94429SFrederic Weisbecker return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active); 60526ba94429SFrederic Weisbecker } 60536ba94429SFrederic Weisbecker 60546ba94429SFrederic Weisbecker static ssize_t max_active_store(struct device *dev, 60556ba94429SFrederic Weisbecker struct device_attribute *attr, const char *buf, 60566ba94429SFrederic Weisbecker size_t count) 60576ba94429SFrederic Weisbecker { 60586ba94429SFrederic Weisbecker struct workqueue_struct *wq = dev_to_wq(dev); 60596ba94429SFrederic Weisbecker int val; 60606ba94429SFrederic Weisbecker 60616ba94429SFrederic Weisbecker if (sscanf(buf, "%d", &val) != 1 || val <= 0) 60626ba94429SFrederic Weisbecker return -EINVAL; 60636ba94429SFrederic Weisbecker 60646ba94429SFrederic Weisbecker workqueue_set_max_active(wq, val); 60656ba94429SFrederic Weisbecker return count; 60666ba94429SFrederic Weisbecker } 60676ba94429SFrederic Weisbecker static DEVICE_ATTR_RW(max_active); 60686ba94429SFrederic Weisbecker 60696ba94429SFrederic Weisbecker static struct attribute *wq_sysfs_attrs[] = { 60706ba94429SFrederic Weisbecker &dev_attr_per_cpu.attr, 60716ba94429SFrederic Weisbecker &dev_attr_max_active.attr, 60726ba94429SFrederic Weisbecker NULL, 60736ba94429SFrederic Weisbecker }; 60746ba94429SFrederic Weisbecker ATTRIBUTE_GROUPS(wq_sysfs); 60756ba94429SFrederic Weisbecker 60766ba94429SFrederic Weisbecker static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr, 60776ba94429SFrederic Weisbecker char *buf) 60786ba94429SFrederic Weisbecker { 60796ba94429SFrederic Weisbecker struct workqueue_struct *wq = dev_to_wq(dev); 60806ba94429SFrederic Weisbecker int written; 60816ba94429SFrederic Weisbecker 60826ba94429SFrederic Weisbecker mutex_lock(&wq->mutex); 60836ba94429SFrederic Weisbecker written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice); 60846ba94429SFrederic Weisbecker mutex_unlock(&wq->mutex); 60856ba94429SFrederic Weisbecker 60866ba94429SFrederic Weisbecker return written; 60876ba94429SFrederic Weisbecker } 60886ba94429SFrederic Weisbecker 60896ba94429SFrederic Weisbecker /* prepare workqueue_attrs for sysfs store operations */ 60906ba94429SFrederic Weisbecker static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq) 60916ba94429SFrederic Weisbecker { 60926ba94429SFrederic Weisbecker struct workqueue_attrs *attrs; 60936ba94429SFrederic Weisbecker 6094899a94feSLai Jiangshan lockdep_assert_held(&wq_pool_mutex); 6095899a94feSLai Jiangshan 6096be69d00dSThomas Gleixner attrs = alloc_workqueue_attrs(); 60976ba94429SFrederic Weisbecker if (!attrs) 60986ba94429SFrederic Weisbecker return NULL; 60996ba94429SFrederic Weisbecker 61006ba94429SFrederic Weisbecker copy_workqueue_attrs(attrs, wq->unbound_attrs); 61016ba94429SFrederic Weisbecker return attrs; 61026ba94429SFrederic Weisbecker } 61036ba94429SFrederic Weisbecker 61046ba94429SFrederic Weisbecker static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr, 61056ba94429SFrederic Weisbecker const char *buf, size_t count) 61066ba94429SFrederic Weisbecker { 61076ba94429SFrederic Weisbecker struct workqueue_struct *wq = dev_to_wq(dev); 61086ba94429SFrederic Weisbecker struct workqueue_attrs *attrs; 6109d4d3e257SLai Jiangshan int ret = -ENOMEM; 6110d4d3e257SLai Jiangshan 6111d4d3e257SLai Jiangshan apply_wqattrs_lock(); 61126ba94429SFrederic Weisbecker 61136ba94429SFrederic Weisbecker attrs = wq_sysfs_prep_attrs(wq); 61146ba94429SFrederic Weisbecker if (!attrs) 6115d4d3e257SLai Jiangshan goto out_unlock; 61166ba94429SFrederic Weisbecker 61176ba94429SFrederic Weisbecker if (sscanf(buf, "%d", &attrs->nice) == 1 && 61186ba94429SFrederic Weisbecker attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE) 6119d4d3e257SLai Jiangshan ret = apply_workqueue_attrs_locked(wq, attrs); 61206ba94429SFrederic Weisbecker else 61216ba94429SFrederic Weisbecker ret = -EINVAL; 61226ba94429SFrederic Weisbecker 6123d4d3e257SLai Jiangshan out_unlock: 6124d4d3e257SLai Jiangshan apply_wqattrs_unlock(); 61256ba94429SFrederic Weisbecker free_workqueue_attrs(attrs); 61266ba94429SFrederic Weisbecker return ret ?: count; 61276ba94429SFrederic Weisbecker } 61286ba94429SFrederic Weisbecker 61296ba94429SFrederic Weisbecker static ssize_t wq_cpumask_show(struct device *dev, 61306ba94429SFrederic Weisbecker struct device_attribute *attr, char *buf) 61316ba94429SFrederic Weisbecker { 61326ba94429SFrederic Weisbecker struct workqueue_struct *wq = dev_to_wq(dev); 61336ba94429SFrederic Weisbecker int written; 61346ba94429SFrederic Weisbecker 61356ba94429SFrederic Weisbecker mutex_lock(&wq->mutex); 61366ba94429SFrederic Weisbecker written = scnprintf(buf, PAGE_SIZE, "%*pb\n", 61376ba94429SFrederic Weisbecker cpumask_pr_args(wq->unbound_attrs->cpumask)); 61386ba94429SFrederic Weisbecker mutex_unlock(&wq->mutex); 61396ba94429SFrederic Weisbecker return written; 61406ba94429SFrederic Weisbecker } 61416ba94429SFrederic Weisbecker 61426ba94429SFrederic Weisbecker static ssize_t wq_cpumask_store(struct device *dev, 61436ba94429SFrederic Weisbecker struct device_attribute *attr, 61446ba94429SFrederic Weisbecker const char *buf, size_t count) 61456ba94429SFrederic Weisbecker { 61466ba94429SFrederic Weisbecker struct workqueue_struct *wq = dev_to_wq(dev); 61476ba94429SFrederic Weisbecker struct workqueue_attrs *attrs; 6148d4d3e257SLai Jiangshan int ret = -ENOMEM; 6149d4d3e257SLai Jiangshan 6150d4d3e257SLai Jiangshan apply_wqattrs_lock(); 61516ba94429SFrederic Weisbecker 61526ba94429SFrederic Weisbecker attrs = wq_sysfs_prep_attrs(wq); 61536ba94429SFrederic Weisbecker if (!attrs) 6154d4d3e257SLai Jiangshan goto out_unlock; 61556ba94429SFrederic Weisbecker 61566ba94429SFrederic Weisbecker ret = cpumask_parse(buf, attrs->cpumask); 61576ba94429SFrederic Weisbecker if (!ret) 6158d4d3e257SLai Jiangshan ret = apply_workqueue_attrs_locked(wq, attrs); 61596ba94429SFrederic Weisbecker 6160d4d3e257SLai Jiangshan out_unlock: 6161d4d3e257SLai Jiangshan apply_wqattrs_unlock(); 61626ba94429SFrederic Weisbecker free_workqueue_attrs(attrs); 61636ba94429SFrederic Weisbecker return ret ?: count; 61646ba94429SFrederic Weisbecker } 61656ba94429SFrederic Weisbecker 616663c5484eSTejun Heo static ssize_t wq_affn_scope_show(struct device *dev, 616763c5484eSTejun Heo struct device_attribute *attr, char *buf) 616863c5484eSTejun Heo { 616963c5484eSTejun Heo struct workqueue_struct *wq = dev_to_wq(dev); 617063c5484eSTejun Heo int written; 617163c5484eSTejun Heo 617263c5484eSTejun Heo mutex_lock(&wq->mutex); 6173523a301eSTejun Heo if (wq->unbound_attrs->affn_scope == WQ_AFFN_DFL) 6174523a301eSTejun Heo written = scnprintf(buf, PAGE_SIZE, "%s (%s)\n", 6175523a301eSTejun Heo wq_affn_names[WQ_AFFN_DFL], 6176523a301eSTejun Heo wq_affn_names[wq_affn_dfl]); 6177523a301eSTejun Heo else 617863c5484eSTejun Heo written = scnprintf(buf, PAGE_SIZE, "%s\n", 617963c5484eSTejun Heo wq_affn_names[wq->unbound_attrs->affn_scope]); 618063c5484eSTejun Heo mutex_unlock(&wq->mutex); 618163c5484eSTejun Heo 618263c5484eSTejun Heo return written; 618363c5484eSTejun Heo } 618463c5484eSTejun Heo 618563c5484eSTejun Heo static ssize_t wq_affn_scope_store(struct device *dev, 618663c5484eSTejun Heo struct device_attribute *attr, 618763c5484eSTejun Heo const char *buf, size_t count) 618863c5484eSTejun Heo { 618963c5484eSTejun Heo struct workqueue_struct *wq = dev_to_wq(dev); 619063c5484eSTejun Heo struct workqueue_attrs *attrs; 619163c5484eSTejun Heo int affn, ret = -ENOMEM; 619263c5484eSTejun Heo 619363c5484eSTejun Heo affn = parse_affn_scope(buf); 619463c5484eSTejun Heo if (affn < 0) 619563c5484eSTejun Heo return affn; 619663c5484eSTejun Heo 619763c5484eSTejun Heo apply_wqattrs_lock(); 619863c5484eSTejun Heo attrs = wq_sysfs_prep_attrs(wq); 619963c5484eSTejun Heo if (attrs) { 620063c5484eSTejun Heo attrs->affn_scope = affn; 620163c5484eSTejun Heo ret = apply_workqueue_attrs_locked(wq, attrs); 620263c5484eSTejun Heo } 620363c5484eSTejun Heo apply_wqattrs_unlock(); 620463c5484eSTejun Heo free_workqueue_attrs(attrs); 620563c5484eSTejun Heo return ret ?: count; 620663c5484eSTejun Heo } 620763c5484eSTejun Heo 62088639ecebSTejun Heo static ssize_t wq_affinity_strict_show(struct device *dev, 62098639ecebSTejun Heo struct device_attribute *attr, char *buf) 62108639ecebSTejun Heo { 62118639ecebSTejun Heo struct workqueue_struct *wq = dev_to_wq(dev); 62128639ecebSTejun Heo 62138639ecebSTejun Heo return scnprintf(buf, PAGE_SIZE, "%d\n", 62148639ecebSTejun Heo wq->unbound_attrs->affn_strict); 62158639ecebSTejun Heo } 62168639ecebSTejun Heo 62178639ecebSTejun Heo static ssize_t wq_affinity_strict_store(struct device *dev, 62188639ecebSTejun Heo struct device_attribute *attr, 62198639ecebSTejun Heo const char *buf, size_t count) 62208639ecebSTejun Heo { 62218639ecebSTejun Heo struct workqueue_struct *wq = dev_to_wq(dev); 62228639ecebSTejun Heo struct workqueue_attrs *attrs; 62238639ecebSTejun Heo int v, ret = -ENOMEM; 62248639ecebSTejun Heo 62258639ecebSTejun Heo if (sscanf(buf, "%d", &v) != 1) 62268639ecebSTejun Heo return -EINVAL; 62278639ecebSTejun Heo 62288639ecebSTejun Heo apply_wqattrs_lock(); 62298639ecebSTejun Heo attrs = wq_sysfs_prep_attrs(wq); 62308639ecebSTejun Heo if (attrs) { 62318639ecebSTejun Heo attrs->affn_strict = (bool)v; 62328639ecebSTejun Heo ret = apply_workqueue_attrs_locked(wq, attrs); 62338639ecebSTejun Heo } 62348639ecebSTejun Heo apply_wqattrs_unlock(); 62358639ecebSTejun Heo free_workqueue_attrs(attrs); 62368639ecebSTejun Heo return ret ?: count; 62378639ecebSTejun Heo } 62388639ecebSTejun Heo 62396ba94429SFrederic Weisbecker static struct device_attribute wq_sysfs_unbound_attrs[] = { 62406ba94429SFrederic Weisbecker __ATTR(nice, 0644, wq_nice_show, wq_nice_store), 62416ba94429SFrederic Weisbecker __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store), 624263c5484eSTejun Heo __ATTR(affinity_scope, 0644, wq_affn_scope_show, wq_affn_scope_store), 62438639ecebSTejun Heo __ATTR(affinity_strict, 0644, wq_affinity_strict_show, wq_affinity_strict_store), 62446ba94429SFrederic Weisbecker __ATTR_NULL, 62456ba94429SFrederic Weisbecker }; 62466ba94429SFrederic Weisbecker 62476ba94429SFrederic Weisbecker static struct bus_type wq_subsys = { 62486ba94429SFrederic Weisbecker .name = "workqueue", 62496ba94429SFrederic Weisbecker .dev_groups = wq_sysfs_groups, 62506ba94429SFrederic Weisbecker }; 62516ba94429SFrederic Weisbecker 6252b05a7928SFrederic Weisbecker static ssize_t wq_unbound_cpumask_show(struct device *dev, 6253b05a7928SFrederic Weisbecker struct device_attribute *attr, char *buf) 6254b05a7928SFrederic Weisbecker { 6255b05a7928SFrederic Weisbecker int written; 6256b05a7928SFrederic Weisbecker 6257042f7df1SLai Jiangshan mutex_lock(&wq_pool_mutex); 6258b05a7928SFrederic Weisbecker written = scnprintf(buf, PAGE_SIZE, "%*pb\n", 6259b05a7928SFrederic Weisbecker cpumask_pr_args(wq_unbound_cpumask)); 6260042f7df1SLai Jiangshan mutex_unlock(&wq_pool_mutex); 6261b05a7928SFrederic Weisbecker 6262b05a7928SFrederic Weisbecker return written; 6263b05a7928SFrederic Weisbecker } 6264b05a7928SFrederic Weisbecker 6265042f7df1SLai Jiangshan static ssize_t wq_unbound_cpumask_store(struct device *dev, 6266042f7df1SLai Jiangshan struct device_attribute *attr, const char *buf, size_t count) 6267042f7df1SLai Jiangshan { 6268042f7df1SLai Jiangshan cpumask_var_t cpumask; 6269042f7df1SLai Jiangshan int ret; 6270042f7df1SLai Jiangshan 6271042f7df1SLai Jiangshan if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL)) 6272042f7df1SLai Jiangshan return -ENOMEM; 6273042f7df1SLai Jiangshan 6274042f7df1SLai Jiangshan ret = cpumask_parse(buf, cpumask); 6275042f7df1SLai Jiangshan if (!ret) 6276042f7df1SLai Jiangshan ret = workqueue_set_unbound_cpumask(cpumask); 6277042f7df1SLai Jiangshan 6278042f7df1SLai Jiangshan free_cpumask_var(cpumask); 6279042f7df1SLai Jiangshan return ret ? ret : count; 6280042f7df1SLai Jiangshan } 6281042f7df1SLai Jiangshan 6282b05a7928SFrederic Weisbecker static struct device_attribute wq_sysfs_cpumask_attr = 6283042f7df1SLai Jiangshan __ATTR(cpumask, 0644, wq_unbound_cpumask_show, 6284042f7df1SLai Jiangshan wq_unbound_cpumask_store); 6285b05a7928SFrederic Weisbecker 62866ba94429SFrederic Weisbecker static int __init wq_sysfs_init(void) 62876ba94429SFrederic Weisbecker { 6288686f6697SGreg Kroah-Hartman struct device *dev_root; 6289b05a7928SFrederic Weisbecker int err; 6290b05a7928SFrederic Weisbecker 6291b05a7928SFrederic Weisbecker err = subsys_virtual_register(&wq_subsys, NULL); 6292b05a7928SFrederic Weisbecker if (err) 6293b05a7928SFrederic Weisbecker return err; 6294b05a7928SFrederic Weisbecker 6295686f6697SGreg Kroah-Hartman dev_root = bus_get_dev_root(&wq_subsys); 6296686f6697SGreg Kroah-Hartman if (dev_root) { 6297686f6697SGreg Kroah-Hartman err = device_create_file(dev_root, &wq_sysfs_cpumask_attr); 6298686f6697SGreg Kroah-Hartman put_device(dev_root); 6299686f6697SGreg Kroah-Hartman } 6300686f6697SGreg Kroah-Hartman return err; 63016ba94429SFrederic Weisbecker } 63026ba94429SFrederic Weisbecker core_initcall(wq_sysfs_init); 63036ba94429SFrederic Weisbecker 63046ba94429SFrederic Weisbecker static void wq_device_release(struct device *dev) 63056ba94429SFrederic Weisbecker { 63066ba94429SFrederic Weisbecker struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); 63076ba94429SFrederic Weisbecker 63086ba94429SFrederic Weisbecker kfree(wq_dev); 63096ba94429SFrederic Weisbecker } 63106ba94429SFrederic Weisbecker 63116ba94429SFrederic Weisbecker /** 63126ba94429SFrederic Weisbecker * workqueue_sysfs_register - make a workqueue visible in sysfs 63136ba94429SFrederic Weisbecker * @wq: the workqueue to register 63146ba94429SFrederic Weisbecker * 63156ba94429SFrederic Weisbecker * Expose @wq in sysfs under /sys/bus/workqueue/devices. 63166ba94429SFrederic Weisbecker * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set 63176ba94429SFrederic Weisbecker * which is the preferred method. 63186ba94429SFrederic Weisbecker * 63196ba94429SFrederic Weisbecker * Workqueue user should use this function directly iff it wants to apply 63206ba94429SFrederic Weisbecker * workqueue_attrs before making the workqueue visible in sysfs; otherwise, 63216ba94429SFrederic Weisbecker * apply_workqueue_attrs() may race against userland updating the 63226ba94429SFrederic Weisbecker * attributes. 63236ba94429SFrederic Weisbecker * 63246ba94429SFrederic Weisbecker * Return: 0 on success, -errno on failure. 63256ba94429SFrederic Weisbecker */ 63266ba94429SFrederic Weisbecker int workqueue_sysfs_register(struct workqueue_struct *wq) 63276ba94429SFrederic Weisbecker { 63286ba94429SFrederic Weisbecker struct wq_device *wq_dev; 63296ba94429SFrederic Weisbecker int ret; 63306ba94429SFrederic Weisbecker 63316ba94429SFrederic Weisbecker /* 6332402dd89dSShailendra Verma * Adjusting max_active or creating new pwqs by applying 63336ba94429SFrederic Weisbecker * attributes breaks ordering guarantee. Disallow exposing ordered 63346ba94429SFrederic Weisbecker * workqueues. 63356ba94429SFrederic Weisbecker */ 63360a94efb5STejun Heo if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 63376ba94429SFrederic Weisbecker return -EINVAL; 63386ba94429SFrederic Weisbecker 63396ba94429SFrederic Weisbecker wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); 63406ba94429SFrederic Weisbecker if (!wq_dev) 63416ba94429SFrederic Weisbecker return -ENOMEM; 63426ba94429SFrederic Weisbecker 63436ba94429SFrederic Weisbecker wq_dev->wq = wq; 63446ba94429SFrederic Weisbecker wq_dev->dev.bus = &wq_subsys; 63456ba94429SFrederic Weisbecker wq_dev->dev.release = wq_device_release; 634623217b44SLars-Peter Clausen dev_set_name(&wq_dev->dev, "%s", wq->name); 63476ba94429SFrederic Weisbecker 63486ba94429SFrederic Weisbecker /* 63496ba94429SFrederic Weisbecker * unbound_attrs are created separately. Suppress uevent until 63506ba94429SFrederic Weisbecker * everything is ready. 63516ba94429SFrederic Weisbecker */ 63526ba94429SFrederic Weisbecker dev_set_uevent_suppress(&wq_dev->dev, true); 63536ba94429SFrederic Weisbecker 63546ba94429SFrederic Weisbecker ret = device_register(&wq_dev->dev); 63556ba94429SFrederic Weisbecker if (ret) { 6356537f4146SArvind Yadav put_device(&wq_dev->dev); 63576ba94429SFrederic Weisbecker wq->wq_dev = NULL; 63586ba94429SFrederic Weisbecker return ret; 63596ba94429SFrederic Weisbecker } 63606ba94429SFrederic Weisbecker 63616ba94429SFrederic Weisbecker if (wq->flags & WQ_UNBOUND) { 63626ba94429SFrederic Weisbecker struct device_attribute *attr; 63636ba94429SFrederic Weisbecker 63646ba94429SFrederic Weisbecker for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) { 63656ba94429SFrederic Weisbecker ret = device_create_file(&wq_dev->dev, attr); 63666ba94429SFrederic Weisbecker if (ret) { 63676ba94429SFrederic Weisbecker device_unregister(&wq_dev->dev); 63686ba94429SFrederic Weisbecker wq->wq_dev = NULL; 63696ba94429SFrederic Weisbecker return ret; 63706ba94429SFrederic Weisbecker } 63716ba94429SFrederic Weisbecker } 63726ba94429SFrederic Weisbecker } 63736ba94429SFrederic Weisbecker 63746ba94429SFrederic Weisbecker dev_set_uevent_suppress(&wq_dev->dev, false); 63756ba94429SFrederic Weisbecker kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD); 63766ba94429SFrederic Weisbecker return 0; 63776ba94429SFrederic Weisbecker } 63786ba94429SFrederic Weisbecker 63796ba94429SFrederic Weisbecker /** 63806ba94429SFrederic Weisbecker * workqueue_sysfs_unregister - undo workqueue_sysfs_register() 63816ba94429SFrederic Weisbecker * @wq: the workqueue to unregister 63826ba94429SFrederic Weisbecker * 63836ba94429SFrederic Weisbecker * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister. 63846ba94429SFrederic Weisbecker */ 63856ba94429SFrederic Weisbecker static void workqueue_sysfs_unregister(struct workqueue_struct *wq) 63866ba94429SFrederic Weisbecker { 63876ba94429SFrederic Weisbecker struct wq_device *wq_dev = wq->wq_dev; 63886ba94429SFrederic Weisbecker 63896ba94429SFrederic Weisbecker if (!wq->wq_dev) 63906ba94429SFrederic Weisbecker return; 63916ba94429SFrederic Weisbecker 63926ba94429SFrederic Weisbecker wq->wq_dev = NULL; 63936ba94429SFrederic Weisbecker device_unregister(&wq_dev->dev); 63946ba94429SFrederic Weisbecker } 63956ba94429SFrederic Weisbecker #else /* CONFIG_SYSFS */ 63966ba94429SFrederic Weisbecker static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { } 63976ba94429SFrederic Weisbecker #endif /* CONFIG_SYSFS */ 63986ba94429SFrederic Weisbecker 639982607adcSTejun Heo /* 640082607adcSTejun Heo * Workqueue watchdog. 640182607adcSTejun Heo * 640282607adcSTejun Heo * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal 640382607adcSTejun Heo * flush dependency, a concurrency managed work item which stays RUNNING 640482607adcSTejun Heo * indefinitely. Workqueue stalls can be very difficult to debug as the 640582607adcSTejun Heo * usual warning mechanisms don't trigger and internal workqueue state is 640682607adcSTejun Heo * largely opaque. 640782607adcSTejun Heo * 640882607adcSTejun Heo * Workqueue watchdog monitors all worker pools periodically and dumps 640982607adcSTejun Heo * state if some pools failed to make forward progress for a while where 641082607adcSTejun Heo * forward progress is defined as the first item on ->worklist changing. 641182607adcSTejun Heo * 641282607adcSTejun Heo * This mechanism is controlled through the kernel parameter 641382607adcSTejun Heo * "workqueue.watchdog_thresh" which can be updated at runtime through the 641482607adcSTejun Heo * corresponding sysfs parameter file. 641582607adcSTejun Heo */ 641682607adcSTejun Heo #ifdef CONFIG_WQ_WATCHDOG 641782607adcSTejun Heo 641882607adcSTejun Heo static unsigned long wq_watchdog_thresh = 30; 64195cd79d6aSKees Cook static struct timer_list wq_watchdog_timer; 642082607adcSTejun Heo 642182607adcSTejun Heo static unsigned long wq_watchdog_touched = INITIAL_JIFFIES; 642282607adcSTejun Heo static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES; 642382607adcSTejun Heo 6424cd2440d6SPetr Mladek /* 6425cd2440d6SPetr Mladek * Show workers that might prevent the processing of pending work items. 6426cd2440d6SPetr Mladek * The only candidates are CPU-bound workers in the running state. 6427cd2440d6SPetr Mladek * Pending work items should be handled by another idle worker 6428cd2440d6SPetr Mladek * in all other situations. 6429cd2440d6SPetr Mladek */ 6430cd2440d6SPetr Mladek static void show_cpu_pool_hog(struct worker_pool *pool) 6431cd2440d6SPetr Mladek { 6432cd2440d6SPetr Mladek struct worker *worker; 6433cd2440d6SPetr Mladek unsigned long flags; 6434cd2440d6SPetr Mladek int bkt; 6435cd2440d6SPetr Mladek 6436cd2440d6SPetr Mladek raw_spin_lock_irqsave(&pool->lock, flags); 6437cd2440d6SPetr Mladek 6438cd2440d6SPetr Mladek hash_for_each(pool->busy_hash, bkt, worker, hentry) { 6439cd2440d6SPetr Mladek if (task_is_running(worker->task)) { 6440cd2440d6SPetr Mladek /* 6441cd2440d6SPetr Mladek * Defer printing to avoid deadlocks in console 6442cd2440d6SPetr Mladek * drivers that queue work while holding locks 6443cd2440d6SPetr Mladek * also taken in their write paths. 6444cd2440d6SPetr Mladek */ 6445cd2440d6SPetr Mladek printk_deferred_enter(); 6446cd2440d6SPetr Mladek 6447cd2440d6SPetr Mladek pr_info("pool %d:\n", pool->id); 6448cd2440d6SPetr Mladek sched_show_task(worker->task); 6449cd2440d6SPetr Mladek 6450cd2440d6SPetr Mladek printk_deferred_exit(); 6451cd2440d6SPetr Mladek } 6452cd2440d6SPetr Mladek } 6453cd2440d6SPetr Mladek 6454cd2440d6SPetr Mladek raw_spin_unlock_irqrestore(&pool->lock, flags); 6455cd2440d6SPetr Mladek } 6456cd2440d6SPetr Mladek 6457cd2440d6SPetr Mladek static void show_cpu_pools_hogs(void) 6458cd2440d6SPetr Mladek { 6459cd2440d6SPetr Mladek struct worker_pool *pool; 6460cd2440d6SPetr Mladek int pi; 6461cd2440d6SPetr Mladek 6462cd2440d6SPetr Mladek pr_info("Showing backtraces of running workers in stalled CPU-bound worker pools:\n"); 6463cd2440d6SPetr Mladek 6464cd2440d6SPetr Mladek rcu_read_lock(); 6465cd2440d6SPetr Mladek 6466cd2440d6SPetr Mladek for_each_pool(pool, pi) { 6467cd2440d6SPetr Mladek if (pool->cpu_stall) 6468cd2440d6SPetr Mladek show_cpu_pool_hog(pool); 6469cd2440d6SPetr Mladek 6470cd2440d6SPetr Mladek } 6471cd2440d6SPetr Mladek 6472cd2440d6SPetr Mladek rcu_read_unlock(); 6473cd2440d6SPetr Mladek } 6474cd2440d6SPetr Mladek 647582607adcSTejun Heo static void wq_watchdog_reset_touched(void) 647682607adcSTejun Heo { 647782607adcSTejun Heo int cpu; 647882607adcSTejun Heo 647982607adcSTejun Heo wq_watchdog_touched = jiffies; 648082607adcSTejun Heo for_each_possible_cpu(cpu) 648182607adcSTejun Heo per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; 648282607adcSTejun Heo } 648382607adcSTejun Heo 64845cd79d6aSKees Cook static void wq_watchdog_timer_fn(struct timer_list *unused) 648582607adcSTejun Heo { 648682607adcSTejun Heo unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ; 648782607adcSTejun Heo bool lockup_detected = false; 6488cd2440d6SPetr Mladek bool cpu_pool_stall = false; 6489940d71c6SSergey Senozhatsky unsigned long now = jiffies; 649082607adcSTejun Heo struct worker_pool *pool; 649182607adcSTejun Heo int pi; 649282607adcSTejun Heo 649382607adcSTejun Heo if (!thresh) 649482607adcSTejun Heo return; 649582607adcSTejun Heo 649682607adcSTejun Heo rcu_read_lock(); 649782607adcSTejun Heo 649882607adcSTejun Heo for_each_pool(pool, pi) { 649982607adcSTejun Heo unsigned long pool_ts, touched, ts; 650082607adcSTejun Heo 6501cd2440d6SPetr Mladek pool->cpu_stall = false; 650282607adcSTejun Heo if (list_empty(&pool->worklist)) 650382607adcSTejun Heo continue; 650482607adcSTejun Heo 6505940d71c6SSergey Senozhatsky /* 6506940d71c6SSergey Senozhatsky * If a virtual machine is stopped by the host it can look to 6507940d71c6SSergey Senozhatsky * the watchdog like a stall. 6508940d71c6SSergey Senozhatsky */ 6509940d71c6SSergey Senozhatsky kvm_check_and_clear_guest_paused(); 6510940d71c6SSergey Senozhatsky 651182607adcSTejun Heo /* get the latest of pool and touched timestamps */ 651289e28ce6SWang Qing if (pool->cpu >= 0) 651389e28ce6SWang Qing touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu)); 651489e28ce6SWang Qing else 651582607adcSTejun Heo touched = READ_ONCE(wq_watchdog_touched); 651689e28ce6SWang Qing pool_ts = READ_ONCE(pool->watchdog_ts); 651782607adcSTejun Heo 651882607adcSTejun Heo if (time_after(pool_ts, touched)) 651982607adcSTejun Heo ts = pool_ts; 652082607adcSTejun Heo else 652182607adcSTejun Heo ts = touched; 652282607adcSTejun Heo 652382607adcSTejun Heo /* did we stall? */ 6524940d71c6SSergey Senozhatsky if (time_after(now, ts + thresh)) { 652582607adcSTejun Heo lockup_detected = true; 6526cd2440d6SPetr Mladek if (pool->cpu >= 0) { 6527cd2440d6SPetr Mladek pool->cpu_stall = true; 6528cd2440d6SPetr Mladek cpu_pool_stall = true; 6529cd2440d6SPetr Mladek } 653082607adcSTejun Heo pr_emerg("BUG: workqueue lockup - pool"); 653182607adcSTejun Heo pr_cont_pool_info(pool); 653282607adcSTejun Heo pr_cont(" stuck for %us!\n", 6533940d71c6SSergey Senozhatsky jiffies_to_msecs(now - pool_ts) / 1000); 653482607adcSTejun Heo } 6535cd2440d6SPetr Mladek 6536cd2440d6SPetr Mladek 653782607adcSTejun Heo } 653882607adcSTejun Heo 653982607adcSTejun Heo rcu_read_unlock(); 654082607adcSTejun Heo 654182607adcSTejun Heo if (lockup_detected) 654255df0933SImran Khan show_all_workqueues(); 654382607adcSTejun Heo 6544cd2440d6SPetr Mladek if (cpu_pool_stall) 6545cd2440d6SPetr Mladek show_cpu_pools_hogs(); 6546cd2440d6SPetr Mladek 654782607adcSTejun Heo wq_watchdog_reset_touched(); 654882607adcSTejun Heo mod_timer(&wq_watchdog_timer, jiffies + thresh); 654982607adcSTejun Heo } 655082607adcSTejun Heo 6551cb9d7fd5SVincent Whitchurch notrace void wq_watchdog_touch(int cpu) 655282607adcSTejun Heo { 655382607adcSTejun Heo if (cpu >= 0) 655482607adcSTejun Heo per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; 655589e28ce6SWang Qing 655682607adcSTejun Heo wq_watchdog_touched = jiffies; 655782607adcSTejun Heo } 655882607adcSTejun Heo 655982607adcSTejun Heo static void wq_watchdog_set_thresh(unsigned long thresh) 656082607adcSTejun Heo { 656182607adcSTejun Heo wq_watchdog_thresh = 0; 656282607adcSTejun Heo del_timer_sync(&wq_watchdog_timer); 656382607adcSTejun Heo 656482607adcSTejun Heo if (thresh) { 656582607adcSTejun Heo wq_watchdog_thresh = thresh; 656682607adcSTejun Heo wq_watchdog_reset_touched(); 656782607adcSTejun Heo mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ); 656882607adcSTejun Heo } 656982607adcSTejun Heo } 657082607adcSTejun Heo 657182607adcSTejun Heo static int wq_watchdog_param_set_thresh(const char *val, 657282607adcSTejun Heo const struct kernel_param *kp) 657382607adcSTejun Heo { 657482607adcSTejun Heo unsigned long thresh; 657582607adcSTejun Heo int ret; 657682607adcSTejun Heo 657782607adcSTejun Heo ret = kstrtoul(val, 0, &thresh); 657882607adcSTejun Heo if (ret) 657982607adcSTejun Heo return ret; 658082607adcSTejun Heo 658182607adcSTejun Heo if (system_wq) 658282607adcSTejun Heo wq_watchdog_set_thresh(thresh); 658382607adcSTejun Heo else 658482607adcSTejun Heo wq_watchdog_thresh = thresh; 658582607adcSTejun Heo 658682607adcSTejun Heo return 0; 658782607adcSTejun Heo } 658882607adcSTejun Heo 658982607adcSTejun Heo static const struct kernel_param_ops wq_watchdog_thresh_ops = { 659082607adcSTejun Heo .set = wq_watchdog_param_set_thresh, 659182607adcSTejun Heo .get = param_get_ulong, 659282607adcSTejun Heo }; 659382607adcSTejun Heo 659482607adcSTejun Heo module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh, 659582607adcSTejun Heo 0644); 659682607adcSTejun Heo 659782607adcSTejun Heo static void wq_watchdog_init(void) 659882607adcSTejun Heo { 65995cd79d6aSKees Cook timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE); 660082607adcSTejun Heo wq_watchdog_set_thresh(wq_watchdog_thresh); 660182607adcSTejun Heo } 660282607adcSTejun Heo 660382607adcSTejun Heo #else /* CONFIG_WQ_WATCHDOG */ 660482607adcSTejun Heo 660582607adcSTejun Heo static inline void wq_watchdog_init(void) { } 660682607adcSTejun Heo 660782607adcSTejun Heo #endif /* CONFIG_WQ_WATCHDOG */ 660882607adcSTejun Heo 6609b2c562a7STejun Heo static void __init restrict_unbound_cpumask(const char *name, const struct cpumask *mask) 6610b2c562a7STejun Heo { 6611b2c562a7STejun Heo if (!cpumask_intersects(wq_unbound_cpumask, mask)) { 6612b2c562a7STejun Heo pr_warn("workqueue: Restricting unbound_cpumask (%*pb) with %s (%*pb) leaves no CPU, ignoring\n", 6613b2c562a7STejun Heo cpumask_pr_args(wq_unbound_cpumask), name, cpumask_pr_args(mask)); 6614b2c562a7STejun Heo return; 6615b2c562a7STejun Heo } 6616b2c562a7STejun Heo 6617b2c562a7STejun Heo cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, mask); 6618b2c562a7STejun Heo } 6619b2c562a7STejun Heo 66203347fa09STejun Heo /** 66213347fa09STejun Heo * workqueue_init_early - early init for workqueue subsystem 66223347fa09STejun Heo * 66232930155bSTejun Heo * This is the first step of three-staged workqueue subsystem initialization and 66242930155bSTejun Heo * invoked as soon as the bare basics - memory allocation, cpumasks and idr are 66252930155bSTejun Heo * up. It sets up all the data structures and system workqueues and allows early 66262930155bSTejun Heo * boot code to create workqueues and queue/cancel work items. Actual work item 66272930155bSTejun Heo * execution starts only after kthreads can be created and scheduled right 66282930155bSTejun Heo * before early initcalls. 66293347fa09STejun Heo */ 66302333e829SYu Chen void __init workqueue_init_early(void) 66311da177e4SLinus Torvalds { 663284193c07STejun Heo struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_SYSTEM]; 66337a4e344cSTejun Heo int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; 66347a4e344cSTejun Heo int i, cpu; 6635c34056a3STejun Heo 663610cdb157SLai Jiangshan BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); 6637e904e6c2STejun Heo 6638b05a7928SFrederic Weisbecker BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL)); 6639b2c562a7STejun Heo cpumask_copy(wq_unbound_cpumask, cpu_possible_mask); 6640b2c562a7STejun Heo restrict_unbound_cpumask("HK_TYPE_WQ", housekeeping_cpumask(HK_TYPE_WQ)); 6641b2c562a7STejun Heo restrict_unbound_cpumask("HK_TYPE_DOMAIN", housekeeping_cpumask(HK_TYPE_DOMAIN)); 6642ace3c549Stiozhang if (!cpumask_empty(&wq_cmdline_cpumask)) 6643b2c562a7STejun Heo restrict_unbound_cpumask("workqueue.unbound_cpus", &wq_cmdline_cpumask); 6644ace3c549Stiozhang 6645e904e6c2STejun Heo pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); 6646e904e6c2STejun Heo 66472930155bSTejun Heo wq_update_pod_attrs_buf = alloc_workqueue_attrs(); 66482930155bSTejun Heo BUG_ON(!wq_update_pod_attrs_buf); 66492930155bSTejun Heo 665084193c07STejun Heo /* initialize WQ_AFFN_SYSTEM pods */ 665184193c07STejun Heo pt->pod_cpus = kcalloc(1, sizeof(pt->pod_cpus[0]), GFP_KERNEL); 665284193c07STejun Heo pt->pod_node = kcalloc(1, sizeof(pt->pod_node[0]), GFP_KERNEL); 665384193c07STejun Heo pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL); 665484193c07STejun Heo BUG_ON(!pt->pod_cpus || !pt->pod_node || !pt->cpu_pod); 665584193c07STejun Heo 665684193c07STejun Heo BUG_ON(!zalloc_cpumask_var_node(&pt->pod_cpus[0], GFP_KERNEL, NUMA_NO_NODE)); 665784193c07STejun Heo 665884193c07STejun Heo pt->nr_pods = 1; 665984193c07STejun Heo cpumask_copy(pt->pod_cpus[0], cpu_possible_mask); 666084193c07STejun Heo pt->pod_node[0] = NUMA_NO_NODE; 666184193c07STejun Heo pt->cpu_pod[0] = 0; 666284193c07STejun Heo 6663706026c2STejun Heo /* initialize CPU pools */ 666429c91e99STejun Heo for_each_possible_cpu(cpu) { 66654ce62e9eSTejun Heo struct worker_pool *pool; 66668b03ae3cSTejun Heo 66677a4e344cSTejun Heo i = 0; 6668f02ae73aSTejun Heo for_each_cpu_worker_pool(pool, cpu) { 66697a4e344cSTejun Heo BUG_ON(init_worker_pool(pool)); 6670ec22ca5eSTejun Heo pool->cpu = cpu; 66717a4e344cSTejun Heo cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); 66729546b29eSTejun Heo cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu)); 66737a4e344cSTejun Heo pool->attrs->nice = std_nice[i++]; 66748639ecebSTejun Heo pool->attrs->affn_strict = true; 6675f3f90ad4STejun Heo pool->node = cpu_to_node(cpu); 66767a4e344cSTejun Heo 66779daf9e67STejun Heo /* alloc pool ID */ 667868e13a67SLai Jiangshan mutex_lock(&wq_pool_mutex); 66799daf9e67STejun Heo BUG_ON(worker_pool_assign_id(pool)); 668068e13a67SLai Jiangshan mutex_unlock(&wq_pool_mutex); 66814ce62e9eSTejun Heo } 66828b03ae3cSTejun Heo } 66838b03ae3cSTejun Heo 66848a2b7538STejun Heo /* create default unbound and ordered wq attrs */ 668529c91e99STejun Heo for (i = 0; i < NR_STD_WORKER_POOLS; i++) { 668629c91e99STejun Heo struct workqueue_attrs *attrs; 668729c91e99STejun Heo 6688be69d00dSThomas Gleixner BUG_ON(!(attrs = alloc_workqueue_attrs())); 668929c91e99STejun Heo attrs->nice = std_nice[i]; 669029c91e99STejun Heo unbound_std_wq_attrs[i] = attrs; 66918a2b7538STejun Heo 66928a2b7538STejun Heo /* 66938a2b7538STejun Heo * An ordered wq should have only one pwq as ordering is 66948a2b7538STejun Heo * guaranteed by max_active which is enforced by pwqs. 66958a2b7538STejun Heo */ 6696be69d00dSThomas Gleixner BUG_ON(!(attrs = alloc_workqueue_attrs())); 66978a2b7538STejun Heo attrs->nice = std_nice[i]; 6698af73f5c9STejun Heo attrs->ordered = true; 66998a2b7538STejun Heo ordered_wq_attrs[i] = attrs; 670029c91e99STejun Heo } 670129c91e99STejun Heo 6702d320c038STejun Heo system_wq = alloc_workqueue("events", 0, 0); 67031aabe902SJoonsoo Kim system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0); 6704d320c038STejun Heo system_long_wq = alloc_workqueue("events_long", 0, 0); 6705f3421797STejun Heo system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, 6706636b927eSTejun Heo WQ_MAX_ACTIVE); 670724d51addSTejun Heo system_freezable_wq = alloc_workqueue("events_freezable", 670824d51addSTejun Heo WQ_FREEZABLE, 0); 67090668106cSViresh Kumar system_power_efficient_wq = alloc_workqueue("events_power_efficient", 67100668106cSViresh Kumar WQ_POWER_EFFICIENT, 0); 67117bff1820SGreg Kroah-Hartman system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient", 67120668106cSViresh Kumar WQ_FREEZABLE | WQ_POWER_EFFICIENT, 67130668106cSViresh Kumar 0); 67141aabe902SJoonsoo Kim BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq || 67150668106cSViresh Kumar !system_unbound_wq || !system_freezable_wq || 67160668106cSViresh Kumar !system_power_efficient_wq || 67170668106cSViresh Kumar !system_freezable_power_efficient_wq); 67183347fa09STejun Heo } 67193347fa09STejun Heo 6720aa6fde93STejun Heo static void __init wq_cpu_intensive_thresh_init(void) 6721aa6fde93STejun Heo { 6722aa6fde93STejun Heo unsigned long thresh; 6723aa6fde93STejun Heo unsigned long bogo; 6724aa6fde93STejun Heo 6725dd64c873SZqiang pwq_release_worker = kthread_create_worker(0, "pool_workqueue_release"); 6726dd64c873SZqiang BUG_ON(IS_ERR(pwq_release_worker)); 6727dd64c873SZqiang 6728aa6fde93STejun Heo /* if the user set it to a specific value, keep it */ 6729aa6fde93STejun Heo if (wq_cpu_intensive_thresh_us != ULONG_MAX) 6730aa6fde93STejun Heo return; 6731aa6fde93STejun Heo 6732aa6fde93STejun Heo /* 6733aa6fde93STejun Heo * The default of 10ms is derived from the fact that most modern (as of 6734aa6fde93STejun Heo * 2023) processors can do a lot in 10ms and that it's just below what 6735aa6fde93STejun Heo * most consider human-perceivable. However, the kernel also runs on a 6736aa6fde93STejun Heo * lot slower CPUs including microcontrollers where the threshold is way 6737aa6fde93STejun Heo * too low. 6738aa6fde93STejun Heo * 6739aa6fde93STejun Heo * Let's scale up the threshold upto 1 second if BogoMips is below 4000. 6740aa6fde93STejun Heo * This is by no means accurate but it doesn't have to be. The mechanism 6741aa6fde93STejun Heo * is still useful even when the threshold is fully scaled up. Also, as 6742aa6fde93STejun Heo * the reports would usually be applicable to everyone, some machines 6743aa6fde93STejun Heo * operating on longer thresholds won't significantly diminish their 6744aa6fde93STejun Heo * usefulness. 6745aa6fde93STejun Heo */ 6746aa6fde93STejun Heo thresh = 10 * USEC_PER_MSEC; 6747aa6fde93STejun Heo 6748aa6fde93STejun Heo /* see init/calibrate.c for lpj -> BogoMIPS calculation */ 6749aa6fde93STejun Heo bogo = max_t(unsigned long, loops_per_jiffy / 500000 * HZ, 1); 6750aa6fde93STejun Heo if (bogo < 4000) 6751aa6fde93STejun Heo thresh = min_t(unsigned long, thresh * 4000 / bogo, USEC_PER_SEC); 6752aa6fde93STejun Heo 6753aa6fde93STejun Heo pr_debug("wq_cpu_intensive_thresh: lpj=%lu BogoMIPS=%lu thresh_us=%lu\n", 6754aa6fde93STejun Heo loops_per_jiffy, bogo, thresh); 6755aa6fde93STejun Heo 6756aa6fde93STejun Heo wq_cpu_intensive_thresh_us = thresh; 6757aa6fde93STejun Heo } 6758aa6fde93STejun Heo 67593347fa09STejun Heo /** 67603347fa09STejun Heo * workqueue_init - bring workqueue subsystem fully online 67613347fa09STejun Heo * 67622930155bSTejun Heo * This is the second step of three-staged workqueue subsystem initialization 67632930155bSTejun Heo * and invoked as soon as kthreads can be created and scheduled. Workqueues have 67642930155bSTejun Heo * been created and work items queued on them, but there are no kworkers 67652930155bSTejun Heo * executing the work items yet. Populate the worker pools with the initial 67662930155bSTejun Heo * workers and enable future kworker creations. 67673347fa09STejun Heo */ 67682333e829SYu Chen void __init workqueue_init(void) 67693347fa09STejun Heo { 67702186d9f9STejun Heo struct workqueue_struct *wq; 67713347fa09STejun Heo struct worker_pool *pool; 67723347fa09STejun Heo int cpu, bkt; 67733347fa09STejun Heo 6774aa6fde93STejun Heo wq_cpu_intensive_thresh_init(); 6775aa6fde93STejun Heo 67762186d9f9STejun Heo mutex_lock(&wq_pool_mutex); 67772186d9f9STejun Heo 67782930155bSTejun Heo /* 67792930155bSTejun Heo * Per-cpu pools created earlier could be missing node hint. Fix them 67802930155bSTejun Heo * up. Also, create a rescuer for workqueues that requested it. 67812930155bSTejun Heo */ 67822186d9f9STejun Heo for_each_possible_cpu(cpu) { 67832186d9f9STejun Heo for_each_cpu_worker_pool(pool, cpu) { 67842186d9f9STejun Heo pool->node = cpu_to_node(cpu); 67852186d9f9STejun Heo } 67862186d9f9STejun Heo } 67872186d9f9STejun Heo 678840c17f75STejun Heo list_for_each_entry(wq, &workqueues, list) { 678940c17f75STejun Heo WARN(init_rescuer(wq), 679040c17f75STejun Heo "workqueue: failed to create early rescuer for %s", 679140c17f75STejun Heo wq->name); 679240c17f75STejun Heo } 67932186d9f9STejun Heo 67942186d9f9STejun Heo mutex_unlock(&wq_pool_mutex); 67952186d9f9STejun Heo 67963347fa09STejun Heo /* create the initial workers */ 67973347fa09STejun Heo for_each_online_cpu(cpu) { 67983347fa09STejun Heo for_each_cpu_worker_pool(pool, cpu) { 67993347fa09STejun Heo pool->flags &= ~POOL_DISASSOCIATED; 68003347fa09STejun Heo BUG_ON(!create_worker(pool)); 68013347fa09STejun Heo } 68023347fa09STejun Heo } 68033347fa09STejun Heo 68043347fa09STejun Heo hash_for_each(unbound_pool_hash, bkt, pool, hash_node) 68053347fa09STejun Heo BUG_ON(!create_worker(pool)); 68063347fa09STejun Heo 68073347fa09STejun Heo wq_online = true; 680882607adcSTejun Heo wq_watchdog_init(); 68091da177e4SLinus Torvalds } 6810c4f135d6STetsuo Handa 6811025e1684STejun Heo /* 6812025e1684STejun Heo * Initialize @pt by first initializing @pt->cpu_pod[] with pod IDs according to 6813025e1684STejun Heo * @cpu_shares_pod(). Each subset of CPUs that share a pod is assigned a unique 6814025e1684STejun Heo * and consecutive pod ID. The rest of @pt is initialized accordingly. 6815025e1684STejun Heo */ 6816025e1684STejun Heo static void __init init_pod_type(struct wq_pod_type *pt, 6817025e1684STejun Heo bool (*cpus_share_pod)(int, int)) 6818025e1684STejun Heo { 6819025e1684STejun Heo int cur, pre, cpu, pod; 6820025e1684STejun Heo 6821025e1684STejun Heo pt->nr_pods = 0; 6822025e1684STejun Heo 6823025e1684STejun Heo /* init @pt->cpu_pod[] according to @cpus_share_pod() */ 6824025e1684STejun Heo pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL); 6825025e1684STejun Heo BUG_ON(!pt->cpu_pod); 6826025e1684STejun Heo 6827025e1684STejun Heo for_each_possible_cpu(cur) { 6828025e1684STejun Heo for_each_possible_cpu(pre) { 6829025e1684STejun Heo if (pre >= cur) { 6830025e1684STejun Heo pt->cpu_pod[cur] = pt->nr_pods++; 6831025e1684STejun Heo break; 6832025e1684STejun Heo } 6833025e1684STejun Heo if (cpus_share_pod(cur, pre)) { 6834025e1684STejun Heo pt->cpu_pod[cur] = pt->cpu_pod[pre]; 6835025e1684STejun Heo break; 6836025e1684STejun Heo } 6837025e1684STejun Heo } 6838025e1684STejun Heo } 6839025e1684STejun Heo 6840025e1684STejun Heo /* init the rest to match @pt->cpu_pod[] */ 6841025e1684STejun Heo pt->pod_cpus = kcalloc(pt->nr_pods, sizeof(pt->pod_cpus[0]), GFP_KERNEL); 6842025e1684STejun Heo pt->pod_node = kcalloc(pt->nr_pods, sizeof(pt->pod_node[0]), GFP_KERNEL); 6843025e1684STejun Heo BUG_ON(!pt->pod_cpus || !pt->pod_node); 6844025e1684STejun Heo 6845025e1684STejun Heo for (pod = 0; pod < pt->nr_pods; pod++) 6846025e1684STejun Heo BUG_ON(!zalloc_cpumask_var(&pt->pod_cpus[pod], GFP_KERNEL)); 6847025e1684STejun Heo 6848025e1684STejun Heo for_each_possible_cpu(cpu) { 6849025e1684STejun Heo cpumask_set_cpu(cpu, pt->pod_cpus[pt->cpu_pod[cpu]]); 6850025e1684STejun Heo pt->pod_node[pt->cpu_pod[cpu]] = cpu_to_node(cpu); 6851025e1684STejun Heo } 6852025e1684STejun Heo } 6853025e1684STejun Heo 685463c5484eSTejun Heo static bool __init cpus_dont_share(int cpu0, int cpu1) 685563c5484eSTejun Heo { 685663c5484eSTejun Heo return false; 685763c5484eSTejun Heo } 685863c5484eSTejun Heo 685963c5484eSTejun Heo static bool __init cpus_share_smt(int cpu0, int cpu1) 686063c5484eSTejun Heo { 686163c5484eSTejun Heo #ifdef CONFIG_SCHED_SMT 686263c5484eSTejun Heo return cpumask_test_cpu(cpu0, cpu_smt_mask(cpu1)); 686363c5484eSTejun Heo #else 686463c5484eSTejun Heo return false; 686563c5484eSTejun Heo #endif 686663c5484eSTejun Heo } 686763c5484eSTejun Heo 6868025e1684STejun Heo static bool __init cpus_share_numa(int cpu0, int cpu1) 6869025e1684STejun Heo { 6870025e1684STejun Heo return cpu_to_node(cpu0) == cpu_to_node(cpu1); 6871025e1684STejun Heo } 6872025e1684STejun Heo 68732930155bSTejun Heo /** 68742930155bSTejun Heo * workqueue_init_topology - initialize CPU pods for unbound workqueues 68752930155bSTejun Heo * 68762930155bSTejun Heo * This is the third step of there-staged workqueue subsystem initialization and 68772930155bSTejun Heo * invoked after SMP and topology information are fully initialized. It 68782930155bSTejun Heo * initializes the unbound CPU pods accordingly. 68792930155bSTejun Heo */ 68802930155bSTejun Heo void __init workqueue_init_topology(void) 6881a86feae6STejun Heo { 68822930155bSTejun Heo struct workqueue_struct *wq; 6883025e1684STejun Heo int cpu; 6884a86feae6STejun Heo 688563c5484eSTejun Heo init_pod_type(&wq_pod_types[WQ_AFFN_CPU], cpus_dont_share); 688663c5484eSTejun Heo init_pod_type(&wq_pod_types[WQ_AFFN_SMT], cpus_share_smt); 688763c5484eSTejun Heo init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache); 6888025e1684STejun Heo init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa); 6889a86feae6STejun Heo 68902930155bSTejun Heo mutex_lock(&wq_pool_mutex); 6891a86feae6STejun Heo 6892a86feae6STejun Heo /* 68932930155bSTejun Heo * Workqueues allocated earlier would have all CPUs sharing the default 68942930155bSTejun Heo * worker pool. Explicitly call wq_update_pod() on all workqueue and CPU 68952930155bSTejun Heo * combinations to apply per-pod sharing. 68962930155bSTejun Heo */ 68972930155bSTejun Heo list_for_each_entry(wq, &workqueues, list) { 68986741dd3fSGreg Kroah-Hartman for_each_online_cpu(cpu) { 68992930155bSTejun Heo wq_update_pod(wq, cpu, cpu, true); 69002930155bSTejun Heo } 69012930155bSTejun Heo } 69022930155bSTejun Heo 69032930155bSTejun Heo mutex_unlock(&wq_pool_mutex); 6904a86feae6STejun Heo } 6905a86feae6STejun Heo 690620bdedafSTetsuo Handa void __warn_flushing_systemwide_wq(void) 690720bdedafSTetsuo Handa { 690820bdedafSTetsuo Handa pr_warn("WARNING: Flushing system-wide workqueues will be prohibited in near future.\n"); 690920bdedafSTetsuo Handa dump_stack(); 691020bdedafSTetsuo Handa } 6911c4f135d6STetsuo Handa EXPORT_SYMBOL(__warn_flushing_systemwide_wq); 6912ace3c549Stiozhang 6913ace3c549Stiozhang static int __init workqueue_unbound_cpus_setup(char *str) 6914ace3c549Stiozhang { 6915ace3c549Stiozhang if (cpulist_parse(str, &wq_cmdline_cpumask) < 0) { 6916ace3c549Stiozhang cpumask_clear(&wq_cmdline_cpumask); 6917ace3c549Stiozhang pr_warn("workqueue.unbound_cpus: incorrect CPU range, using default\n"); 6918ace3c549Stiozhang } 6919ace3c549Stiozhang 6920ace3c549Stiozhang return 1; 6921ace3c549Stiozhang } 6922ace3c549Stiozhang __setup("workqueue.unbound_cpus=", workqueue_unbound_cpus_setup); 6923