1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 3c54fce6eSTejun Heo * kernel/workqueue.c - generic async execution with shared worker pool 41da177e4SLinus Torvalds * 5c54fce6eSTejun Heo * Copyright (C) 2002 Ingo Molnar 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Derived from the taskqueue/keventd code by: 81da177e4SLinus Torvalds * David Woodhouse <dwmw2@infradead.org> 9e1f8e874SFrancois Cami * Andrew Morton 101da177e4SLinus Torvalds * Kai Petzke <wpp@marie.physik.tu-berlin.de> 111da177e4SLinus Torvalds * Theodore Ts'o <tytso@mit.edu> 1289ada679SChristoph Lameter * 13cde53535SChristoph Lameter * Made to use alloc_percpu by Christoph Lameter. 14c54fce6eSTejun Heo * 15c54fce6eSTejun Heo * Copyright (C) 2010 SUSE Linux Products GmbH 16c54fce6eSTejun Heo * Copyright (C) 2010 Tejun Heo <tj@kernel.org> 17c54fce6eSTejun Heo * 18c54fce6eSTejun Heo * This is the generic async execution mechanism. Work items as are 19c54fce6eSTejun Heo * executed in process context. The worker pool is shared and 20b11895c4SLibin * automatically managed. There are two worker pools for each CPU (one for 21b11895c4SLibin * normal work items and the other for high priority ones) and some extra 22b11895c4SLibin * pools for workqueues which are not bound to any specific CPU - the 23b11895c4SLibin * number of these backing pools is dynamic. 24c54fce6eSTejun Heo * 259a261491SBenjamin Peterson * Please read Documentation/core-api/workqueue.rst for details. 261da177e4SLinus Torvalds */ 271da177e4SLinus Torvalds 289984de1aSPaul Gortmaker #include <linux/export.h> 291da177e4SLinus Torvalds #include <linux/kernel.h> 301da177e4SLinus Torvalds #include <linux/sched.h> 311da177e4SLinus Torvalds #include <linux/init.h> 321da177e4SLinus Torvalds #include <linux/signal.h> 331da177e4SLinus Torvalds #include <linux/completion.h> 341da177e4SLinus Torvalds #include <linux/workqueue.h> 351da177e4SLinus Torvalds #include <linux/slab.h> 361da177e4SLinus Torvalds #include <linux/cpu.h> 371da177e4SLinus Torvalds #include <linux/notifier.h> 381da177e4SLinus Torvalds #include <linux/kthread.h> 391fa44ecaSJames Bottomley #include <linux/hardirq.h> 4046934023SChristoph Lameter #include <linux/mempolicy.h> 41341a5958SRafael J. Wysocki #include <linux/freezer.h> 42d5abe669SPeter Zijlstra #include <linux/debug_locks.h> 434e6045f1SJohannes Berg #include <linux/lockdep.h> 44c34056a3STejun Heo #include <linux/idr.h> 4529c91e99STejun Heo #include <linux/jhash.h> 4642f8570fSSasha Levin #include <linux/hashtable.h> 4776af4d93STejun Heo #include <linux/rculist.h> 48bce90380STejun Heo #include <linux/nodemask.h> 494c16bd32STejun Heo #include <linux/moduleparam.h> 503d1cb205STejun Heo #include <linux/uaccess.h> 51c98a9805STal Shorer #include <linux/sched/isolation.h> 52cd2440d6SPetr Mladek #include <linux/sched/debug.h> 5362635ea8SSergey Senozhatsky #include <linux/nmi.h> 54940d71c6SSergey Senozhatsky #include <linux/kvm_para.h> 55aa6fde93STejun Heo #include <linux/delay.h> 56e22bee78STejun Heo 57ea138446STejun Heo #include "workqueue_internal.h" 581da177e4SLinus Torvalds 59c8e55f36STejun Heo enum { 60bc2ae0f5STejun Heo /* 6124647570STejun Heo * worker_pool flags 62bc2ae0f5STejun Heo * 6324647570STejun Heo * A bound pool is either associated or disassociated with its CPU. 64bc2ae0f5STejun Heo * While associated (!DISASSOCIATED), all workers are bound to the 65bc2ae0f5STejun Heo * CPU and none has %WORKER_UNBOUND set and concurrency management 66bc2ae0f5STejun Heo * is in effect. 67bc2ae0f5STejun Heo * 68bc2ae0f5STejun Heo * While DISASSOCIATED, the cpu may be offline and all workers have 69bc2ae0f5STejun Heo * %WORKER_UNBOUND set and concurrency management disabled, and may 7024647570STejun Heo * be executing on any CPU. The pool behaves as an unbound one. 71bc2ae0f5STejun Heo * 72bc3a1afcSTejun Heo * Note that DISASSOCIATED should be flipped only while holding 731258fae7STejun Heo * wq_pool_attach_mutex to avoid changing binding state while 744736cbf7SLai Jiangshan * worker_attach_to_pool() is in progress. 75bc2ae0f5STejun Heo */ 76692b4825STejun Heo POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */ 7724647570STejun Heo POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ 78db7bccf4STejun Heo 79c8e55f36STejun Heo /* worker flags */ 80c8e55f36STejun Heo WORKER_DIE = 1 << 1, /* die die die */ 81c8e55f36STejun Heo WORKER_IDLE = 1 << 2, /* is idle */ 82e22bee78STejun Heo WORKER_PREP = 1 << 3, /* preparing to run works */ 83fb0e7bebSTejun Heo WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ 84f3421797STejun Heo WORKER_UNBOUND = 1 << 7, /* worker is unbound */ 85a9ab775bSTejun Heo WORKER_REBOUND = 1 << 8, /* worker was rebound */ 86e22bee78STejun Heo 87a9ab775bSTejun Heo WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE | 88a9ab775bSTejun Heo WORKER_UNBOUND | WORKER_REBOUND, 89db7bccf4STejun Heo 90e34cdddbSTejun Heo NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */ 914ce62e9eSTejun Heo 9229c91e99STejun Heo UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */ 93c8e55f36STejun Heo BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ 94db7bccf4STejun Heo 95e22bee78STejun Heo MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ 96e22bee78STejun Heo IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ 97e22bee78STejun Heo 983233cdbdSTejun Heo MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, 993233cdbdSTejun Heo /* call for help after 10ms 1003233cdbdSTejun Heo (min two ticks) */ 101e22bee78STejun Heo MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ 102e22bee78STejun Heo CREATE_COOLDOWN = HZ, /* time to breath after fail */ 1031da177e4SLinus Torvalds 1041da177e4SLinus Torvalds /* 105e22bee78STejun Heo * Rescue workers are used only on emergencies and shared by 1068698a745SDongsheng Yang * all cpus. Give MIN_NICE. 107e22bee78STejun Heo */ 1088698a745SDongsheng Yang RESCUER_NICE_LEVEL = MIN_NICE, 1098698a745SDongsheng Yang HIGHPRI_NICE_LEVEL = MIN_NICE, 110ecf6881fSTejun Heo 11143a181f8SAudra Mitchell WQ_NAME_LEN = 32, 112c8e55f36STejun Heo }; 113c8e55f36STejun Heo 1141da177e4SLinus Torvalds /* 1154690c4abSTejun Heo * Structure fields follow one of the following exclusion rules. 1164690c4abSTejun Heo * 117e41e704bSTejun Heo * I: Modifiable by initialization/destruction paths and read-only for 118e41e704bSTejun Heo * everyone else. 1194690c4abSTejun Heo * 120e22bee78STejun Heo * P: Preemption protected. Disabling preemption is enough and should 121e22bee78STejun Heo * only be modified and accessed from the local cpu. 122e22bee78STejun Heo * 123d565ed63STejun Heo * L: pool->lock protected. Access with pool->lock held. 1244690c4abSTejun Heo * 125bdf8b9bfSTejun Heo * K: Only modified by worker while holding pool->lock. Can be safely read by 126bdf8b9bfSTejun Heo * self, while holding pool->lock or from IRQ context if %current is the 127bdf8b9bfSTejun Heo * kworker. 128bdf8b9bfSTejun Heo * 129bdf8b9bfSTejun Heo * S: Only modified by worker self. 130bdf8b9bfSTejun Heo * 1311258fae7STejun Heo * A: wq_pool_attach_mutex protected. 132822d8405STejun Heo * 13368e13a67SLai Jiangshan * PL: wq_pool_mutex protected. 13476af4d93STejun Heo * 13524acfb71SThomas Gleixner * PR: wq_pool_mutex protected for writes. RCU protected for reads. 1365bcab335STejun Heo * 1375b95e1afSLai Jiangshan * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads. 1385b95e1afSLai Jiangshan * 1395b95e1afSLai Jiangshan * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or 14024acfb71SThomas Gleixner * RCU for reads. 1415b95e1afSLai Jiangshan * 1423c25a55dSLai Jiangshan * WQ: wq->mutex protected. 1433c25a55dSLai Jiangshan * 14424acfb71SThomas Gleixner * WR: wq->mutex protected for writes. RCU protected for reads. 1452e109a28STejun Heo * 14682e098f5STejun Heo * WO: wq->mutex protected for writes. Updated with WRITE_ONCE() and can be read 14782e098f5STejun Heo * with READ_ONCE() without locking. 14882e098f5STejun Heo * 1492e109a28STejun Heo * MD: wq_mayday_lock protected. 150cd2440d6SPetr Mladek * 151cd2440d6SPetr Mladek * WD: Used internally by the watchdog. 1524690c4abSTejun Heo */ 1534690c4abSTejun Heo 1542eaebdb3STejun Heo /* struct worker is defined in workqueue_internal.h */ 155c34056a3STejun Heo 156bd7bdd43STejun Heo struct worker_pool { 157a9b8a985SSebastian Andrzej Siewior raw_spinlock_t lock; /* the pool lock */ 158d84ff051STejun Heo int cpu; /* I: the associated cpu */ 159f3f90ad4STejun Heo int node; /* I: the associated node ID */ 1609daf9e67STejun Heo int id; /* I: pool ID */ 161bc8b50c2STejun Heo unsigned int flags; /* L: flags */ 162bd7bdd43STejun Heo 16382607adcSTejun Heo unsigned long watchdog_ts; /* L: watchdog timestamp */ 164cd2440d6SPetr Mladek bool cpu_stall; /* WD: stalled cpu bound pool */ 16582607adcSTejun Heo 166bc35f7efSLai Jiangshan /* 167bc35f7efSLai Jiangshan * The counter is incremented in a process context on the associated CPU 168bc35f7efSLai Jiangshan * w/ preemption disabled, and decremented or reset in the same context 169bc35f7efSLai Jiangshan * but w/ pool->lock held. The readers grab pool->lock and are 170bc35f7efSLai Jiangshan * guaranteed to see if the counter reached zero. 171bc35f7efSLai Jiangshan */ 172bc35f7efSLai Jiangshan int nr_running; 17384f91c62SLai Jiangshan 174bd7bdd43STejun Heo struct list_head worklist; /* L: list of pending works */ 175ea1abd61SLai Jiangshan 1765826cc8fSLai Jiangshan int nr_workers; /* L: total number of workers */ 1775826cc8fSLai Jiangshan int nr_idle; /* L: currently idle workers */ 178bd7bdd43STejun Heo 1792c1f1a91SLai Jiangshan struct list_head idle_list; /* L: list of idle workers */ 180bd7bdd43STejun Heo struct timer_list idle_timer; /* L: worker idle timeout */ 1813f959aa3SValentin Schneider struct work_struct idle_cull_work; /* L: worker idle cleanup */ 1823f959aa3SValentin Schneider 183bd7bdd43STejun Heo struct timer_list mayday_timer; /* L: SOS timer for workers */ 184bd7bdd43STejun Heo 185c5aa87bbSTejun Heo /* a workers is either on busy_hash or idle_list, or the manager */ 186c9e7cf27STejun Heo DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER); 187c9e7cf27STejun Heo /* L: hash of busy workers */ 188c9e7cf27STejun Heo 1892607d7a6STejun Heo struct worker *manager; /* L: purely informational */ 19092f9c5c4SLai Jiangshan struct list_head workers; /* A: attached workers */ 191e02b9312SValentin Schneider struct list_head dying_workers; /* A: workers about to die */ 19260f5a4bcSLai Jiangshan struct completion *detach_completion; /* all workers detached */ 193e19e397aSTejun Heo 1947cda9aaeSLai Jiangshan struct ida worker_ida; /* worker IDs for task name */ 195e19e397aSTejun Heo 1967a4e344cSTejun Heo struct workqueue_attrs *attrs; /* I: worker attributes */ 19768e13a67SLai Jiangshan struct hlist_node hash_node; /* PL: unbound_pool_hash node */ 19868e13a67SLai Jiangshan int refcnt; /* PL: refcnt for unbound pools */ 1997a4e344cSTejun Heo 200e19e397aSTejun Heo /* 20124acfb71SThomas Gleixner * Destruction of pool is RCU protected to allow dereferences 20229c91e99STejun Heo * from get_work_pool(). 20329c91e99STejun Heo */ 20429c91e99STejun Heo struct rcu_head rcu; 20584f91c62SLai Jiangshan }; 2068b03ae3cSTejun Heo 2078b03ae3cSTejun Heo /* 208725e8ec5STejun Heo * Per-pool_workqueue statistics. These can be monitored using 209725e8ec5STejun Heo * tools/workqueue/wq_monitor.py. 210725e8ec5STejun Heo */ 211725e8ec5STejun Heo enum pool_workqueue_stats { 212725e8ec5STejun Heo PWQ_STAT_STARTED, /* work items started execution */ 213725e8ec5STejun Heo PWQ_STAT_COMPLETED, /* work items completed execution */ 2148a1dd1e5STejun Heo PWQ_STAT_CPU_TIME, /* total CPU time consumed */ 215616db877STejun Heo PWQ_STAT_CPU_INTENSIVE, /* wq_cpu_intensive_thresh_us violations */ 216725e8ec5STejun Heo PWQ_STAT_CM_WAKEUP, /* concurrency-management worker wakeups */ 2178639ecebSTejun Heo PWQ_STAT_REPATRIATED, /* unbound workers brought back into scope */ 218725e8ec5STejun Heo PWQ_STAT_MAYDAY, /* maydays to rescuer */ 219725e8ec5STejun Heo PWQ_STAT_RESCUED, /* linked work items executed by rescuer */ 220725e8ec5STejun Heo 221725e8ec5STejun Heo PWQ_NR_STATS, 222725e8ec5STejun Heo }; 223725e8ec5STejun Heo 224725e8ec5STejun Heo /* 225112202d9STejun Heo * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS 226112202d9STejun Heo * of work_struct->data are used for flags and the remaining high bits 227112202d9STejun Heo * point to the pwq; thus, pwqs need to be aligned at two's power of the 228112202d9STejun Heo * number of flag bits. 2291da177e4SLinus Torvalds */ 230112202d9STejun Heo struct pool_workqueue { 231bd7bdd43STejun Heo struct worker_pool *pool; /* I: the associated pool */ 2324690c4abSTejun Heo struct workqueue_struct *wq; /* I: the owning workqueue */ 23373f53c4aSTejun Heo int work_color; /* L: current color */ 23473f53c4aSTejun Heo int flush_color; /* L: flushing color */ 2358864b4e5STejun Heo int refcnt; /* L: reference count */ 23673f53c4aSTejun Heo int nr_in_flight[WORK_NR_COLORS]; 23773f53c4aSTejun Heo /* L: nr of in_flight works */ 238018f3a13SLai Jiangshan 239018f3a13SLai Jiangshan /* 240018f3a13SLai Jiangshan * nr_active management and WORK_STRUCT_INACTIVE: 241018f3a13SLai Jiangshan * 242018f3a13SLai Jiangshan * When pwq->nr_active >= max_active, new work item is queued to 243018f3a13SLai Jiangshan * pwq->inactive_works instead of pool->worklist and marked with 244018f3a13SLai Jiangshan * WORK_STRUCT_INACTIVE. 245018f3a13SLai Jiangshan * 2466741dd3fSGreg Kroah-Hartman * All work items marked with WORK_STRUCT_INACTIVE do not participate 2476741dd3fSGreg Kroah-Hartman * in pwq->nr_active and all work items in pwq->inactive_works are 2486741dd3fSGreg Kroah-Hartman * marked with WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE 2496741dd3fSGreg Kroah-Hartman * work items are in pwq->inactive_works. Some of them are ready to 2506741dd3fSGreg Kroah-Hartman * run in pool->worklist or worker->scheduled. Those work itmes are 2516741dd3fSGreg Kroah-Hartman * only struct wq_barrier which is used for flush_work() and should 2526741dd3fSGreg Kroah-Hartman * not participate in pwq->nr_active. For non-barrier work item, it 2536741dd3fSGreg Kroah-Hartman * is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works. 254018f3a13SLai Jiangshan */ 2551e19ffc6STejun Heo int nr_active; /* L: nr of active works */ 256f97a4a1aSLai Jiangshan struct list_head inactive_works; /* L: inactive works */ 2573c25a55dSLai Jiangshan struct list_head pwqs_node; /* WR: node on wq->pwqs */ 2582e109a28STejun Heo struct list_head mayday_node; /* MD: node on wq->maydays */ 2598864b4e5STejun Heo 260725e8ec5STejun Heo u64 stats[PWQ_NR_STATS]; 261725e8ec5STejun Heo 2628864b4e5STejun Heo /* 263967b494eSTejun Heo * Release of unbound pwq is punted to a kthread_worker. See put_pwq() 264687a9aa5STejun Heo * and pwq_release_workfn() for details. pool_workqueue itself is also 265687a9aa5STejun Heo * RCU protected so that the first pwq can be determined without 266967b494eSTejun Heo * grabbing wq->mutex. 2678864b4e5STejun Heo */ 268687a9aa5STejun Heo struct kthread_work release_work; 2698864b4e5STejun Heo struct rcu_head rcu; 270e904e6c2STejun Heo } __aligned(1 << WORK_STRUCT_FLAG_BITS); 2711da177e4SLinus Torvalds 2721da177e4SLinus Torvalds /* 27373f53c4aSTejun Heo * Structure used to wait for workqueue flush. 27473f53c4aSTejun Heo */ 27573f53c4aSTejun Heo struct wq_flusher { 2763c25a55dSLai Jiangshan struct list_head list; /* WQ: list of flushers */ 2773c25a55dSLai Jiangshan int flush_color; /* WQ: flush color waiting for */ 27873f53c4aSTejun Heo struct completion done; /* flush completion */ 27973f53c4aSTejun Heo }; 2801da177e4SLinus Torvalds 281226223abSTejun Heo struct wq_device; 282226223abSTejun Heo 28373f53c4aSTejun Heo /* 284c5aa87bbSTejun Heo * The externally visible workqueue. It relays the issued work items to 285c5aa87bbSTejun Heo * the appropriate worker_pool through its pool_workqueues. 2861da177e4SLinus Torvalds */ 2871da177e4SLinus Torvalds struct workqueue_struct { 2883c25a55dSLai Jiangshan struct list_head pwqs; /* WR: all pwqs of this wq */ 289e2dca7adSTejun Heo struct list_head list; /* PR: list of all workqueues */ 29073f53c4aSTejun Heo 2913c25a55dSLai Jiangshan struct mutex mutex; /* protects this wq */ 2923c25a55dSLai Jiangshan int work_color; /* WQ: current work color */ 2933c25a55dSLai Jiangshan int flush_color; /* WQ: current flush color */ 294112202d9STejun Heo atomic_t nr_pwqs_to_flush; /* flush in progress */ 2953c25a55dSLai Jiangshan struct wq_flusher *first_flusher; /* WQ: first flusher */ 2963c25a55dSLai Jiangshan struct list_head flusher_queue; /* WQ: flush waiters */ 2973c25a55dSLai Jiangshan struct list_head flusher_overflow; /* WQ: flush overflow list */ 29873f53c4aSTejun Heo 2992e109a28STejun Heo struct list_head maydays; /* MD: pwqs requesting rescue */ 30030ae2fc0STejun Heo struct worker *rescuer; /* MD: rescue worker */ 301e22bee78STejun Heo 30287fc741eSLai Jiangshan int nr_drainers; /* WQ: drain in progress */ 30382e098f5STejun Heo int max_active; /* WO: max active works */ 30482e098f5STejun Heo int saved_max_active; /* WQ: saved max_active */ 305226223abSTejun Heo 3065b95e1afSLai Jiangshan struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */ 307f3c11cb2SGreg Kroah-Hartman struct pool_workqueue *dfl_pwq; /* PW: only for unbound wqs */ 3086029a918STejun Heo 309226223abSTejun Heo #ifdef CONFIG_SYSFS 310226223abSTejun Heo struct wq_device *wq_dev; /* I: for sysfs interface */ 311226223abSTejun Heo #endif 3124e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP 313669de8bdSBart Van Assche char *lock_name; 314669de8bdSBart Van Assche struct lock_class_key key; 3154e6045f1SJohannes Berg struct lockdep_map lockdep_map; 3164e6045f1SJohannes Berg #endif 317ecf6881fSTejun Heo char name[WQ_NAME_LEN]; /* I: workqueue name */ 3182728fd2fSTejun Heo 319e2dca7adSTejun Heo /* 32024acfb71SThomas Gleixner * Destruction of workqueue_struct is RCU protected to allow walking 32124acfb71SThomas Gleixner * the workqueues list without grabbing wq_pool_mutex. 322e2dca7adSTejun Heo * This is used to dump all workqueues from sysrq. 323e2dca7adSTejun Heo */ 324e2dca7adSTejun Heo struct rcu_head rcu; 325e2dca7adSTejun Heo 3262728fd2fSTejun Heo /* hot fields used during command issue, aligned to cacheline */ 3272728fd2fSTejun Heo unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */ 328636b927eSTejun Heo struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */ 3291da177e4SLinus Torvalds }; 3301da177e4SLinus Torvalds 331e904e6c2STejun Heo static struct kmem_cache *pwq_cache; 332e904e6c2STejun Heo 33384193c07STejun Heo /* 33484193c07STejun Heo * Each pod type describes how CPUs should be grouped for unbound workqueues. 33584193c07STejun Heo * See the comment above workqueue_attrs->affn_scope. 33684193c07STejun Heo */ 33784193c07STejun Heo struct wq_pod_type { 33884193c07STejun Heo int nr_pods; /* number of pods */ 33984193c07STejun Heo cpumask_var_t *pod_cpus; /* pod -> cpus */ 34084193c07STejun Heo int *pod_node; /* pod -> node */ 34184193c07STejun Heo int *cpu_pod; /* cpu -> pod */ 34284193c07STejun Heo }; 34384193c07STejun Heo 34484193c07STejun Heo static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES]; 345523a301eSTejun Heo static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE; 34663c5484eSTejun Heo 34763c5484eSTejun Heo static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = { 348523a301eSTejun Heo [WQ_AFFN_DFL] = "default", 34963c5484eSTejun Heo [WQ_AFFN_CPU] = "cpu", 35063c5484eSTejun Heo [WQ_AFFN_SMT] = "smt", 35163c5484eSTejun Heo [WQ_AFFN_CACHE] = "cache", 35263c5484eSTejun Heo [WQ_AFFN_NUMA] = "numa", 35363c5484eSTejun Heo [WQ_AFFN_SYSTEM] = "system", 35463c5484eSTejun Heo }; 355bce90380STejun Heo 356616db877STejun Heo /* 357616db877STejun Heo * Per-cpu work items which run for longer than the following threshold are 358616db877STejun Heo * automatically considered CPU intensive and excluded from concurrency 359616db877STejun Heo * management to prevent them from noticeably delaying other per-cpu work items. 360aa6fde93STejun Heo * ULONG_MAX indicates that the user hasn't overridden it with a boot parameter. 361aa6fde93STejun Heo * The actual value is initialized in wq_cpu_intensive_thresh_init(). 362616db877STejun Heo */ 363aa6fde93STejun Heo static unsigned long wq_cpu_intensive_thresh_us = ULONG_MAX; 364616db877STejun Heo module_param_named(cpu_intensive_thresh_us, wq_cpu_intensive_thresh_us, ulong, 0644); 365616db877STejun Heo 366cee22a15SViresh Kumar /* see the comment above the definition of WQ_POWER_EFFICIENT */ 367552f530cSLuis R. Rodriguez static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT); 368cee22a15SViresh Kumar module_param_named(power_efficient, wq_power_efficient, bool, 0444); 369cee22a15SViresh Kumar 370863b710bSTejun Heo static bool wq_online; /* can kworkers be created yet? */ 3713347fa09STejun Heo 372fef59c9cSTejun Heo /* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */ 373fef59c9cSTejun Heo static struct workqueue_attrs *wq_update_pod_attrs_buf; 3744c16bd32STejun Heo 37568e13a67SLai Jiangshan static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ 3761258fae7STejun Heo static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */ 377a9b8a985SSebastian Andrzej Siewior static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ 378d8bb65abSSebastian Andrzej Siewior /* wait for manager to go away */ 379d8bb65abSSebastian Andrzej Siewior static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait); 3805bcab335STejun Heo 381e2dca7adSTejun Heo static LIST_HEAD(workqueues); /* PR: list of all workqueues */ 38268e13a67SLai Jiangshan static bool workqueue_freezing; /* PL: have wqs started freezing? */ 3837d19c5ceSTejun Heo 38499c621efSLai Jiangshan /* PL&A: allowable cpus for unbound wqs and work items */ 385ef557180SMike Galbraith static cpumask_var_t wq_unbound_cpumask; 386ef557180SMike Galbraith 387ace3c549Stiozhang /* for further constrain wq_unbound_cpumask by cmdline parameter*/ 388ace3c549Stiozhang static struct cpumask wq_cmdline_cpumask __initdata; 389ace3c549Stiozhang 390ef557180SMike Galbraith /* CPU where unbound work was last round robin scheduled from this CPU */ 391ef557180SMike Galbraith static DEFINE_PER_CPU(int, wq_rr_cpu_last); 392b05a7928SFrederic Weisbecker 393f303fccbSTejun Heo /* 394f303fccbSTejun Heo * Local execution of unbound work items is no longer guaranteed. The 395f303fccbSTejun Heo * following always forces round-robin CPU selection on unbound work items 396f303fccbSTejun Heo * to uncover usages which depend on it. 397f303fccbSTejun Heo */ 398f303fccbSTejun Heo #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU 399f303fccbSTejun Heo static bool wq_debug_force_rr_cpu = true; 400f303fccbSTejun Heo #else 401f303fccbSTejun Heo static bool wq_debug_force_rr_cpu = false; 402f303fccbSTejun Heo #endif 403f303fccbSTejun Heo module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644); 404f303fccbSTejun Heo 4057d19c5ceSTejun Heo /* the per-cpu worker pools */ 40625528213SPeter Zijlstra static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools); 4077d19c5ceSTejun Heo 40868e13a67SLai Jiangshan static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */ 4097d19c5ceSTejun Heo 41068e13a67SLai Jiangshan /* PL: hash of all unbound pools keyed by pool->attrs */ 41129c91e99STejun Heo static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER); 41229c91e99STejun Heo 413c5aa87bbSTejun Heo /* I: attributes used when instantiating standard unbound pools on demand */ 41429c91e99STejun Heo static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; 41529c91e99STejun Heo 4168a2b7538STejun Heo /* I: attributes used when instantiating ordered pools on demand */ 4178a2b7538STejun Heo static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS]; 4188a2b7538STejun Heo 419967b494eSTejun Heo /* 420967b494eSTejun Heo * I: kthread_worker to release pwq's. pwq release needs to be bounced to a 421967b494eSTejun Heo * process context while holding a pool lock. Bounce to a dedicated kthread 422967b494eSTejun Heo * worker to avoid A-A deadlocks. 423967b494eSTejun Heo */ 424967b494eSTejun Heo static struct kthread_worker *pwq_release_worker; 425967b494eSTejun Heo 426d320c038STejun Heo struct workqueue_struct *system_wq __read_mostly; 427ad7b1f84SMarc Dionne EXPORT_SYMBOL(system_wq); 428044c782cSValentin Ilie struct workqueue_struct *system_highpri_wq __read_mostly; 4291aabe902SJoonsoo Kim EXPORT_SYMBOL_GPL(system_highpri_wq); 430044c782cSValentin Ilie struct workqueue_struct *system_long_wq __read_mostly; 431d320c038STejun Heo EXPORT_SYMBOL_GPL(system_long_wq); 432044c782cSValentin Ilie struct workqueue_struct *system_unbound_wq __read_mostly; 433f3421797STejun Heo EXPORT_SYMBOL_GPL(system_unbound_wq); 434044c782cSValentin Ilie struct workqueue_struct *system_freezable_wq __read_mostly; 43524d51addSTejun Heo EXPORT_SYMBOL_GPL(system_freezable_wq); 4360668106cSViresh Kumar struct workqueue_struct *system_power_efficient_wq __read_mostly; 4370668106cSViresh Kumar EXPORT_SYMBOL_GPL(system_power_efficient_wq); 4380668106cSViresh Kumar struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly; 4390668106cSViresh Kumar EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); 440d320c038STejun Heo 4417d19c5ceSTejun Heo static int worker_thread(void *__worker); 4426ba94429SFrederic Weisbecker static void workqueue_sysfs_unregister(struct workqueue_struct *wq); 443c29eb853STejun Heo static void show_pwq(struct pool_workqueue *pwq); 44455df0933SImran Khan static void show_one_worker_pool(struct worker_pool *pool); 4457d19c5ceSTejun Heo 44697bd2347STejun Heo #define CREATE_TRACE_POINTS 44797bd2347STejun Heo #include <trace/events/workqueue.h> 44897bd2347STejun Heo 44968e13a67SLai Jiangshan #define assert_rcu_or_pool_mutex() \ 45024acfb71SThomas Gleixner RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ 451f78f5b90SPaul E. McKenney !lockdep_is_held(&wq_pool_mutex), \ 45224acfb71SThomas Gleixner "RCU or wq_pool_mutex should be held") 4535bcab335STejun Heo 4545b95e1afSLai Jiangshan #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ 45524acfb71SThomas Gleixner RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ 456f78f5b90SPaul E. McKenney !lockdep_is_held(&wq->mutex) && \ 457f78f5b90SPaul E. McKenney !lockdep_is_held(&wq_pool_mutex), \ 45824acfb71SThomas Gleixner "RCU, wq->mutex or wq_pool_mutex should be held") 4595b95e1afSLai Jiangshan 460f02ae73aSTejun Heo #define for_each_cpu_worker_pool(pool, cpu) \ 461f02ae73aSTejun Heo for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ 462f02ae73aSTejun Heo (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ 4637a62c2c8STejun Heo (pool)++) 4644ce62e9eSTejun Heo 46549e3cf44STejun Heo /** 46617116969STejun Heo * for_each_pool - iterate through all worker_pools in the system 46717116969STejun Heo * @pool: iteration cursor 468611c92a0STejun Heo * @pi: integer used for iteration 469fa1b54e6STejun Heo * 47024acfb71SThomas Gleixner * This must be called either with wq_pool_mutex held or RCU read 47168e13a67SLai Jiangshan * locked. If the pool needs to be used beyond the locking in effect, the 47268e13a67SLai Jiangshan * caller is responsible for guaranteeing that the pool stays online. 473fa1b54e6STejun Heo * 474fa1b54e6STejun Heo * The if/else clause exists only for the lockdep assertion and can be 475fa1b54e6STejun Heo * ignored. 47617116969STejun Heo */ 477611c92a0STejun Heo #define for_each_pool(pool, pi) \ 478611c92a0STejun Heo idr_for_each_entry(&worker_pool_idr, pool, pi) \ 47968e13a67SLai Jiangshan if (({ assert_rcu_or_pool_mutex(); false; })) { } \ 480fa1b54e6STejun Heo else 48117116969STejun Heo 48217116969STejun Heo /** 483822d8405STejun Heo * for_each_pool_worker - iterate through all workers of a worker_pool 484822d8405STejun Heo * @worker: iteration cursor 485822d8405STejun Heo * @pool: worker_pool to iterate workers of 486822d8405STejun Heo * 4871258fae7STejun Heo * This must be called with wq_pool_attach_mutex. 488822d8405STejun Heo * 489822d8405STejun Heo * The if/else clause exists only for the lockdep assertion and can be 490822d8405STejun Heo * ignored. 491822d8405STejun Heo */ 492da028469SLai Jiangshan #define for_each_pool_worker(worker, pool) \ 493da028469SLai Jiangshan list_for_each_entry((worker), &(pool)->workers, node) \ 4941258fae7STejun Heo if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \ 495822d8405STejun Heo else 496822d8405STejun Heo 497822d8405STejun Heo /** 49849e3cf44STejun Heo * for_each_pwq - iterate through all pool_workqueues of the specified workqueue 49949e3cf44STejun Heo * @pwq: iteration cursor 50049e3cf44STejun Heo * @wq: the target workqueue 50176af4d93STejun Heo * 50224acfb71SThomas Gleixner * This must be called either with wq->mutex held or RCU read locked. 503794b18bcSTejun Heo * If the pwq needs to be used beyond the locking in effect, the caller is 504794b18bcSTejun Heo * responsible for guaranteeing that the pwq stays online. 50576af4d93STejun Heo * 50676af4d93STejun Heo * The if/else clause exists only for the lockdep assertion and can be 50776af4d93STejun Heo * ignored. 50849e3cf44STejun Heo */ 50949e3cf44STejun Heo #define for_each_pwq(pwq, wq) \ 51049e9d1a9SSebastian Andrzej Siewior list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \ 5115a644662SJoel Fernandes (Google) lockdep_is_held(&(wq->mutex))) 512f3421797STejun Heo 513dc186ad7SThomas Gleixner #ifdef CONFIG_DEBUG_OBJECTS_WORK 514dc186ad7SThomas Gleixner 515f9e62f31SStephen Boyd static const struct debug_obj_descr work_debug_descr; 516dc186ad7SThomas Gleixner 51799777288SStanislaw Gruszka static void *work_debug_hint(void *addr) 51899777288SStanislaw Gruszka { 51999777288SStanislaw Gruszka return ((struct work_struct *) addr)->func; 52099777288SStanislaw Gruszka } 52199777288SStanislaw Gruszka 522b9fdac7fSDu, Changbin static bool work_is_static_object(void *addr) 523b9fdac7fSDu, Changbin { 524b9fdac7fSDu, Changbin struct work_struct *work = addr; 525b9fdac7fSDu, Changbin 526b9fdac7fSDu, Changbin return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work)); 527b9fdac7fSDu, Changbin } 528b9fdac7fSDu, Changbin 529dc186ad7SThomas Gleixner /* 530dc186ad7SThomas Gleixner * fixup_init is called when: 531dc186ad7SThomas Gleixner * - an active object is initialized 532dc186ad7SThomas Gleixner */ 53302a982a6SDu, Changbin static bool work_fixup_init(void *addr, enum debug_obj_state state) 534dc186ad7SThomas Gleixner { 535dc186ad7SThomas Gleixner struct work_struct *work = addr; 536dc186ad7SThomas Gleixner 537dc186ad7SThomas Gleixner switch (state) { 538dc186ad7SThomas Gleixner case ODEBUG_STATE_ACTIVE: 539dc186ad7SThomas Gleixner cancel_work_sync(work); 540dc186ad7SThomas Gleixner debug_object_init(work, &work_debug_descr); 54102a982a6SDu, Changbin return true; 542dc186ad7SThomas Gleixner default: 54302a982a6SDu, Changbin return false; 544dc186ad7SThomas Gleixner } 545dc186ad7SThomas Gleixner } 546dc186ad7SThomas Gleixner 547dc186ad7SThomas Gleixner /* 548dc186ad7SThomas Gleixner * fixup_free is called when: 549dc186ad7SThomas Gleixner * - an active object is freed 550dc186ad7SThomas Gleixner */ 55102a982a6SDu, Changbin static bool work_fixup_free(void *addr, enum debug_obj_state state) 552dc186ad7SThomas Gleixner { 553dc186ad7SThomas Gleixner struct work_struct *work = addr; 554dc186ad7SThomas Gleixner 555dc186ad7SThomas Gleixner switch (state) { 556dc186ad7SThomas Gleixner case ODEBUG_STATE_ACTIVE: 557dc186ad7SThomas Gleixner cancel_work_sync(work); 558dc186ad7SThomas Gleixner debug_object_free(work, &work_debug_descr); 55902a982a6SDu, Changbin return true; 560dc186ad7SThomas Gleixner default: 56102a982a6SDu, Changbin return false; 562dc186ad7SThomas Gleixner } 563dc186ad7SThomas Gleixner } 564dc186ad7SThomas Gleixner 565f9e62f31SStephen Boyd static const struct debug_obj_descr work_debug_descr = { 566dc186ad7SThomas Gleixner .name = "work_struct", 56799777288SStanislaw Gruszka .debug_hint = work_debug_hint, 568b9fdac7fSDu, Changbin .is_static_object = work_is_static_object, 569dc186ad7SThomas Gleixner .fixup_init = work_fixup_init, 570dc186ad7SThomas Gleixner .fixup_free = work_fixup_free, 571dc186ad7SThomas Gleixner }; 572dc186ad7SThomas Gleixner 573dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work) 574dc186ad7SThomas Gleixner { 575dc186ad7SThomas Gleixner debug_object_activate(work, &work_debug_descr); 576dc186ad7SThomas Gleixner } 577dc186ad7SThomas Gleixner 578dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work) 579dc186ad7SThomas Gleixner { 580dc186ad7SThomas Gleixner debug_object_deactivate(work, &work_debug_descr); 581dc186ad7SThomas Gleixner } 582dc186ad7SThomas Gleixner 583dc186ad7SThomas Gleixner void __init_work(struct work_struct *work, int onstack) 584dc186ad7SThomas Gleixner { 585dc186ad7SThomas Gleixner if (onstack) 586dc186ad7SThomas Gleixner debug_object_init_on_stack(work, &work_debug_descr); 587dc186ad7SThomas Gleixner else 588dc186ad7SThomas Gleixner debug_object_init(work, &work_debug_descr); 589dc186ad7SThomas Gleixner } 590dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(__init_work); 591dc186ad7SThomas Gleixner 592dc186ad7SThomas Gleixner void destroy_work_on_stack(struct work_struct *work) 593dc186ad7SThomas Gleixner { 594dc186ad7SThomas Gleixner debug_object_free(work, &work_debug_descr); 595dc186ad7SThomas Gleixner } 596dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(destroy_work_on_stack); 597dc186ad7SThomas Gleixner 598ea2e64f2SThomas Gleixner void destroy_delayed_work_on_stack(struct delayed_work *work) 599ea2e64f2SThomas Gleixner { 600ea2e64f2SThomas Gleixner destroy_timer_on_stack(&work->timer); 601ea2e64f2SThomas Gleixner debug_object_free(&work->work, &work_debug_descr); 602ea2e64f2SThomas Gleixner } 603ea2e64f2SThomas Gleixner EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack); 604ea2e64f2SThomas Gleixner 605dc186ad7SThomas Gleixner #else 606dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work) { } 607dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work) { } 608dc186ad7SThomas Gleixner #endif 609dc186ad7SThomas Gleixner 6104e8b22bdSLi Bin /** 61167dc8325SCai Huoqing * worker_pool_assign_id - allocate ID and assign it to @pool 6124e8b22bdSLi Bin * @pool: the pool pointer of interest 6134e8b22bdSLi Bin * 6144e8b22bdSLi Bin * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned 6154e8b22bdSLi Bin * successfully, -errno on failure. 6164e8b22bdSLi Bin */ 6179daf9e67STejun Heo static int worker_pool_assign_id(struct worker_pool *pool) 6189daf9e67STejun Heo { 6199daf9e67STejun Heo int ret; 6209daf9e67STejun Heo 62168e13a67SLai Jiangshan lockdep_assert_held(&wq_pool_mutex); 6225bcab335STejun Heo 6234e8b22bdSLi Bin ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE, 6244e8b22bdSLi Bin GFP_KERNEL); 625229641a6STejun Heo if (ret >= 0) { 626e68035fbSTejun Heo pool->id = ret; 627229641a6STejun Heo return 0; 628229641a6STejun Heo } 6299daf9e67STejun Heo return ret; 6309daf9e67STejun Heo } 6319daf9e67STejun Heo 63273f53c4aSTejun Heo static unsigned int work_color_to_flags(int color) 63373f53c4aSTejun Heo { 63473f53c4aSTejun Heo return color << WORK_STRUCT_COLOR_SHIFT; 63573f53c4aSTejun Heo } 63673f53c4aSTejun Heo 637c4560c2cSLai Jiangshan static int get_work_color(unsigned long work_data) 63873f53c4aSTejun Heo { 639c4560c2cSLai Jiangshan return (work_data >> WORK_STRUCT_COLOR_SHIFT) & 64073f53c4aSTejun Heo ((1 << WORK_STRUCT_COLOR_BITS) - 1); 64173f53c4aSTejun Heo } 64273f53c4aSTejun Heo 64373f53c4aSTejun Heo static int work_next_color(int color) 64473f53c4aSTejun Heo { 64573f53c4aSTejun Heo return (color + 1) % WORK_NR_COLORS; 646a848e3b6SOleg Nesterov } 647a848e3b6SOleg Nesterov 6484594bf15SDavid Howells /* 649112202d9STejun Heo * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data 650112202d9STejun Heo * contain the pointer to the queued pwq. Once execution starts, the flag 6517c3eed5cSTejun Heo * is cleared and the high bits contain OFFQ flags and pool ID. 6527a22ad75STejun Heo * 653112202d9STejun Heo * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling() 654112202d9STejun Heo * and clear_work_data() can be used to set the pwq, pool or clear 655bbb68dfaSTejun Heo * work->data. These functions should only be called while the work is 656bbb68dfaSTejun Heo * owned - ie. while the PENDING bit is set. 6577a22ad75STejun Heo * 658112202d9STejun Heo * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq 6597c3eed5cSTejun Heo * corresponding to a work. Pool is available once the work has been 660112202d9STejun Heo * queued anywhere after initialization until it is sync canceled. pwq is 6617c3eed5cSTejun Heo * available only while the work item is queued. 662bbb68dfaSTejun Heo * 663bbb68dfaSTejun Heo * %WORK_OFFQ_CANCELING is used to mark a work item which is being 664bbb68dfaSTejun Heo * canceled. While being canceled, a work item may have its PENDING set 665bbb68dfaSTejun Heo * but stay off timer and worklist for arbitrarily long and nobody should 666bbb68dfaSTejun Heo * try to steal the PENDING bit. 6674594bf15SDavid Howells */ 6687a22ad75STejun Heo static inline void set_work_data(struct work_struct *work, unsigned long data, 6697a22ad75STejun Heo unsigned long flags) 6707a22ad75STejun Heo { 6716183c009STejun Heo WARN_ON_ONCE(!work_pending(work)); 6727a22ad75STejun Heo atomic_long_set(&work->data, data | flags | work_static(work)); 6737a22ad75STejun Heo } 6747a22ad75STejun Heo 675112202d9STejun Heo static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, 6764690c4abSTejun Heo unsigned long extra_flags) 677365970a1SDavid Howells { 678112202d9STejun Heo set_work_data(work, (unsigned long)pwq, 679112202d9STejun Heo WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags); 680365970a1SDavid Howells } 681365970a1SDavid Howells 6824468a00fSLai Jiangshan static void set_work_pool_and_keep_pending(struct work_struct *work, 6834468a00fSLai Jiangshan int pool_id) 6844468a00fSLai Jiangshan { 6854468a00fSLai Jiangshan set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 6864468a00fSLai Jiangshan WORK_STRUCT_PENDING); 6874468a00fSLai Jiangshan } 6884468a00fSLai Jiangshan 6897c3eed5cSTejun Heo static void set_work_pool_and_clear_pending(struct work_struct *work, 6907c3eed5cSTejun Heo int pool_id) 6914d707b9fSOleg Nesterov { 69223657bb1STejun Heo /* 69323657bb1STejun Heo * The following wmb is paired with the implied mb in 69423657bb1STejun Heo * test_and_set_bit(PENDING) and ensures all updates to @work made 69523657bb1STejun Heo * here are visible to and precede any updates by the next PENDING 69623657bb1STejun Heo * owner. 69723657bb1STejun Heo */ 69823657bb1STejun Heo smp_wmb(); 6997c3eed5cSTejun Heo set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); 700346c09f8SRoman Pen /* 701346c09f8SRoman Pen * The following mb guarantees that previous clear of a PENDING bit 702346c09f8SRoman Pen * will not be reordered with any speculative LOADS or STORES from 703346c09f8SRoman Pen * work->current_func, which is executed afterwards. This possible 7048bdc6201SLiu Song * reordering can lead to a missed execution on attempt to queue 705346c09f8SRoman Pen * the same @work. E.g. consider this case: 706346c09f8SRoman Pen * 707346c09f8SRoman Pen * CPU#0 CPU#1 708346c09f8SRoman Pen * ---------------------------- -------------------------------- 709346c09f8SRoman Pen * 710346c09f8SRoman Pen * 1 STORE event_indicated 711346c09f8SRoman Pen * 2 queue_work_on() { 712346c09f8SRoman Pen * 3 test_and_set_bit(PENDING) 713346c09f8SRoman Pen * 4 } set_..._and_clear_pending() { 714346c09f8SRoman Pen * 5 set_work_data() # clear bit 715346c09f8SRoman Pen * 6 smp_mb() 716346c09f8SRoman Pen * 7 work->current_func() { 717346c09f8SRoman Pen * 8 LOAD event_indicated 718346c09f8SRoman Pen * } 719346c09f8SRoman Pen * 720346c09f8SRoman Pen * Without an explicit full barrier speculative LOAD on line 8 can 721346c09f8SRoman Pen * be executed before CPU#0 does STORE on line 1. If that happens, 722346c09f8SRoman Pen * CPU#0 observes the PENDING bit is still set and new execution of 723346c09f8SRoman Pen * a @work is not queued in a hope, that CPU#1 will eventually 724346c09f8SRoman Pen * finish the queued @work. Meanwhile CPU#1 does not see 725346c09f8SRoman Pen * event_indicated is set, because speculative LOAD was executed 726346c09f8SRoman Pen * before actual STORE. 727346c09f8SRoman Pen */ 728346c09f8SRoman Pen smp_mb(); 7294d707b9fSOleg Nesterov } 7304d707b9fSOleg Nesterov 7317a22ad75STejun Heo static void clear_work_data(struct work_struct *work) 732365970a1SDavid Howells { 7337c3eed5cSTejun Heo smp_wmb(); /* see set_work_pool_and_clear_pending() */ 7347c3eed5cSTejun Heo set_work_data(work, WORK_STRUCT_NO_POOL, 0); 7357a22ad75STejun Heo } 7367a22ad75STejun Heo 737afa4bb77SLinus Torvalds static inline struct pool_workqueue *work_struct_pwq(unsigned long data) 738afa4bb77SLinus Torvalds { 739afa4bb77SLinus Torvalds return (struct pool_workqueue *)(data & WORK_STRUCT_WQ_DATA_MASK); 740afa4bb77SLinus Torvalds } 741afa4bb77SLinus Torvalds 742112202d9STejun Heo static struct pool_workqueue *get_work_pwq(struct work_struct *work) 7437a22ad75STejun Heo { 744e120153dSTejun Heo unsigned long data = atomic_long_read(&work->data); 7457a22ad75STejun Heo 746112202d9STejun Heo if (data & WORK_STRUCT_PWQ) 747afa4bb77SLinus Torvalds return work_struct_pwq(data); 748e120153dSTejun Heo else 749e120153dSTejun Heo return NULL; 7507a22ad75STejun Heo } 7517a22ad75STejun Heo 7527c3eed5cSTejun Heo /** 7537c3eed5cSTejun Heo * get_work_pool - return the worker_pool a given work was associated with 7547c3eed5cSTejun Heo * @work: the work item of interest 7557c3eed5cSTejun Heo * 75668e13a67SLai Jiangshan * Pools are created and destroyed under wq_pool_mutex, and allows read 75724acfb71SThomas Gleixner * access under RCU read lock. As such, this function should be 75824acfb71SThomas Gleixner * called under wq_pool_mutex or inside of a rcu_read_lock() region. 759fa1b54e6STejun Heo * 760fa1b54e6STejun Heo * All fields of the returned pool are accessible as long as the above 761fa1b54e6STejun Heo * mentioned locking is in effect. If the returned pool needs to be used 762fa1b54e6STejun Heo * beyond the critical section, the caller is responsible for ensuring the 763fa1b54e6STejun Heo * returned pool is and stays online. 764d185af30SYacine Belkadi * 765d185af30SYacine Belkadi * Return: The worker_pool @work was last associated with. %NULL if none. 7667c3eed5cSTejun Heo */ 7677c3eed5cSTejun Heo static struct worker_pool *get_work_pool(struct work_struct *work) 7687a22ad75STejun Heo { 769e120153dSTejun Heo unsigned long data = atomic_long_read(&work->data); 7707c3eed5cSTejun Heo int pool_id; 7717a22ad75STejun Heo 77268e13a67SLai Jiangshan assert_rcu_or_pool_mutex(); 773fa1b54e6STejun Heo 774112202d9STejun Heo if (data & WORK_STRUCT_PWQ) 775afa4bb77SLinus Torvalds return work_struct_pwq(data)->pool; 7767a22ad75STejun Heo 7777c3eed5cSTejun Heo pool_id = data >> WORK_OFFQ_POOL_SHIFT; 7787c3eed5cSTejun Heo if (pool_id == WORK_OFFQ_POOL_NONE) 7797a22ad75STejun Heo return NULL; 7807a22ad75STejun Heo 781fa1b54e6STejun Heo return idr_find(&worker_pool_idr, pool_id); 7827c3eed5cSTejun Heo } 7837c3eed5cSTejun Heo 7847c3eed5cSTejun Heo /** 7857c3eed5cSTejun Heo * get_work_pool_id - return the worker pool ID a given work is associated with 7867c3eed5cSTejun Heo * @work: the work item of interest 7877c3eed5cSTejun Heo * 788d185af30SYacine Belkadi * Return: The worker_pool ID @work was last associated with. 7897c3eed5cSTejun Heo * %WORK_OFFQ_POOL_NONE if none. 7907c3eed5cSTejun Heo */ 7917c3eed5cSTejun Heo static int get_work_pool_id(struct work_struct *work) 7927c3eed5cSTejun Heo { 79354d5b7d0SLai Jiangshan unsigned long data = atomic_long_read(&work->data); 7947c3eed5cSTejun Heo 795112202d9STejun Heo if (data & WORK_STRUCT_PWQ) 796afa4bb77SLinus Torvalds return work_struct_pwq(data)->pool->id; 79754d5b7d0SLai Jiangshan 79854d5b7d0SLai Jiangshan return data >> WORK_OFFQ_POOL_SHIFT; 7997c3eed5cSTejun Heo } 8007c3eed5cSTejun Heo 801bbb68dfaSTejun Heo static void mark_work_canceling(struct work_struct *work) 802bbb68dfaSTejun Heo { 8037c3eed5cSTejun Heo unsigned long pool_id = get_work_pool_id(work); 804bbb68dfaSTejun Heo 8057c3eed5cSTejun Heo pool_id <<= WORK_OFFQ_POOL_SHIFT; 8067c3eed5cSTejun Heo set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING); 807bbb68dfaSTejun Heo } 808bbb68dfaSTejun Heo 809bbb68dfaSTejun Heo static bool work_is_canceling(struct work_struct *work) 810bbb68dfaSTejun Heo { 811bbb68dfaSTejun Heo unsigned long data = atomic_long_read(&work->data); 812bbb68dfaSTejun Heo 813112202d9STejun Heo return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING); 814bbb68dfaSTejun Heo } 815bbb68dfaSTejun Heo 816e22bee78STejun Heo /* 8173270476aSTejun Heo * Policy functions. These define the policies on how the global worker 8183270476aSTejun Heo * pools are managed. Unless noted otherwise, these functions assume that 819d565ed63STejun Heo * they're being called with pool->lock held. 820e22bee78STejun Heo */ 821e22bee78STejun Heo 822e22bee78STejun Heo /* 823e22bee78STejun Heo * Need to wake up a worker? Called from anything but currently 824e22bee78STejun Heo * running workers. 825974271c4STejun Heo * 826974271c4STejun Heo * Note that, because unbound workers never contribute to nr_running, this 827706026c2STejun Heo * function will always return %true for unbound pools as long as the 828974271c4STejun Heo * worklist isn't empty. 829e22bee78STejun Heo */ 83063d95a91STejun Heo static bool need_more_worker(struct worker_pool *pool) 831e22bee78STejun Heo { 8320219a352STejun Heo return !list_empty(&pool->worklist) && !pool->nr_running; 833e22bee78STejun Heo } 834e22bee78STejun Heo 835e22bee78STejun Heo /* Can I start working? Called from busy but !running workers. */ 83663d95a91STejun Heo static bool may_start_working(struct worker_pool *pool) 837e22bee78STejun Heo { 83863d95a91STejun Heo return pool->nr_idle; 839e22bee78STejun Heo } 840e22bee78STejun Heo 841e22bee78STejun Heo /* Do I need to keep working? Called from currently running workers. */ 84263d95a91STejun Heo static bool keep_working(struct worker_pool *pool) 843e22bee78STejun Heo { 844bc35f7efSLai Jiangshan return !list_empty(&pool->worklist) && (pool->nr_running <= 1); 845e22bee78STejun Heo } 846e22bee78STejun Heo 847e22bee78STejun Heo /* Do we need a new worker? Called from manager. */ 84863d95a91STejun Heo static bool need_to_create_worker(struct worker_pool *pool) 849e22bee78STejun Heo { 85063d95a91STejun Heo return need_more_worker(pool) && !may_start_working(pool); 851e22bee78STejun Heo } 852e22bee78STejun Heo 853e22bee78STejun Heo /* Do we have too many workers and should some go away? */ 85463d95a91STejun Heo static bool too_many_workers(struct worker_pool *pool) 855e22bee78STejun Heo { 856692b4825STejun Heo bool managing = pool->flags & POOL_MANAGER_ACTIVE; 85763d95a91STejun Heo int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ 85863d95a91STejun Heo int nr_busy = pool->nr_workers - nr_idle; 859e22bee78STejun Heo 860e22bee78STejun Heo return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; 861e22bee78STejun Heo } 862e22bee78STejun Heo 8634690c4abSTejun Heo /** 864e22bee78STejun Heo * worker_set_flags - set worker flags and adjust nr_running accordingly 865cb444766STejun Heo * @worker: self 866d302f017STejun Heo * @flags: flags to set 867d302f017STejun Heo * 868228f1d00SLai Jiangshan * Set @flags in @worker->flags and adjust nr_running accordingly. 869d302f017STejun Heo */ 870228f1d00SLai Jiangshan static inline void worker_set_flags(struct worker *worker, unsigned int flags) 871d302f017STejun Heo { 872bd7bdd43STejun Heo struct worker_pool *pool = worker->pool; 873e22bee78STejun Heo 874bc8b50c2STejun Heo lockdep_assert_held(&pool->lock); 875cb444766STejun Heo 876228f1d00SLai Jiangshan /* If transitioning into NOT_RUNNING, adjust nr_running. */ 877e22bee78STejun Heo if ((flags & WORKER_NOT_RUNNING) && 878e22bee78STejun Heo !(worker->flags & WORKER_NOT_RUNNING)) { 879bc35f7efSLai Jiangshan pool->nr_running--; 880e22bee78STejun Heo } 881e22bee78STejun Heo 882d302f017STejun Heo worker->flags |= flags; 883d302f017STejun Heo } 884d302f017STejun Heo 885d302f017STejun Heo /** 886e22bee78STejun Heo * worker_clr_flags - clear worker flags and adjust nr_running accordingly 887cb444766STejun Heo * @worker: self 888d302f017STejun Heo * @flags: flags to clear 889d302f017STejun Heo * 890e22bee78STejun Heo * Clear @flags in @worker->flags and adjust nr_running accordingly. 891d302f017STejun Heo */ 892d302f017STejun Heo static inline void worker_clr_flags(struct worker *worker, unsigned int flags) 893d302f017STejun Heo { 89463d95a91STejun Heo struct worker_pool *pool = worker->pool; 895e22bee78STejun Heo unsigned int oflags = worker->flags; 896e22bee78STejun Heo 897bc8b50c2STejun Heo lockdep_assert_held(&pool->lock); 898cb444766STejun Heo 899d302f017STejun Heo worker->flags &= ~flags; 900e22bee78STejun Heo 90142c025f3STejun Heo /* 90242c025f3STejun Heo * If transitioning out of NOT_RUNNING, increment nr_running. Note 90342c025f3STejun Heo * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask 90442c025f3STejun Heo * of multiple flags, not a single flag. 90542c025f3STejun Heo */ 906e22bee78STejun Heo if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) 907e22bee78STejun Heo if (!(worker->flags & WORKER_NOT_RUNNING)) 908bc35f7efSLai Jiangshan pool->nr_running++; 909d302f017STejun Heo } 910d302f017STejun Heo 911797e8345STejun Heo /* Return the first idle worker. Called with pool->lock held. */ 912797e8345STejun Heo static struct worker *first_idle_worker(struct worker_pool *pool) 913797e8345STejun Heo { 914797e8345STejun Heo if (unlikely(list_empty(&pool->idle_list))) 915797e8345STejun Heo return NULL; 916797e8345STejun Heo 917797e8345STejun Heo return list_first_entry(&pool->idle_list, struct worker, entry); 918797e8345STejun Heo } 919797e8345STejun Heo 920797e8345STejun Heo /** 921797e8345STejun Heo * worker_enter_idle - enter idle state 922797e8345STejun Heo * @worker: worker which is entering idle state 923797e8345STejun Heo * 924797e8345STejun Heo * @worker is entering idle state. Update stats and idle timer if 925797e8345STejun Heo * necessary. 926797e8345STejun Heo * 927797e8345STejun Heo * LOCKING: 928797e8345STejun Heo * raw_spin_lock_irq(pool->lock). 929797e8345STejun Heo */ 930797e8345STejun Heo static void worker_enter_idle(struct worker *worker) 931797e8345STejun Heo { 932797e8345STejun Heo struct worker_pool *pool = worker->pool; 933797e8345STejun Heo 934797e8345STejun Heo if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) || 935797e8345STejun Heo WARN_ON_ONCE(!list_empty(&worker->entry) && 936797e8345STejun Heo (worker->hentry.next || worker->hentry.pprev))) 937797e8345STejun Heo return; 938797e8345STejun Heo 939797e8345STejun Heo /* can't use worker_set_flags(), also called from create_worker() */ 940797e8345STejun Heo worker->flags |= WORKER_IDLE; 941797e8345STejun Heo pool->nr_idle++; 942797e8345STejun Heo worker->last_active = jiffies; 943797e8345STejun Heo 944797e8345STejun Heo /* idle_list is LIFO */ 945797e8345STejun Heo list_add(&worker->entry, &pool->idle_list); 946797e8345STejun Heo 947797e8345STejun Heo if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) 948797e8345STejun Heo mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); 949797e8345STejun Heo 950797e8345STejun Heo /* Sanity check nr_running. */ 951797e8345STejun Heo WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running); 952797e8345STejun Heo } 953797e8345STejun Heo 954797e8345STejun Heo /** 955797e8345STejun Heo * worker_leave_idle - leave idle state 956797e8345STejun Heo * @worker: worker which is leaving idle state 957797e8345STejun Heo * 958797e8345STejun Heo * @worker is leaving idle state. Update stats. 959797e8345STejun Heo * 960797e8345STejun Heo * LOCKING: 961797e8345STejun Heo * raw_spin_lock_irq(pool->lock). 962797e8345STejun Heo */ 963797e8345STejun Heo static void worker_leave_idle(struct worker *worker) 964797e8345STejun Heo { 965797e8345STejun Heo struct worker_pool *pool = worker->pool; 966797e8345STejun Heo 967797e8345STejun Heo if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE))) 968797e8345STejun Heo return; 969797e8345STejun Heo worker_clr_flags(worker, WORKER_IDLE); 970797e8345STejun Heo pool->nr_idle--; 971797e8345STejun Heo list_del_init(&worker->entry); 972797e8345STejun Heo } 973797e8345STejun Heo 974797e8345STejun Heo /** 975797e8345STejun Heo * find_worker_executing_work - find worker which is executing a work 976797e8345STejun Heo * @pool: pool of interest 977797e8345STejun Heo * @work: work to find worker for 978797e8345STejun Heo * 979797e8345STejun Heo * Find a worker which is executing @work on @pool by searching 980797e8345STejun Heo * @pool->busy_hash which is keyed by the address of @work. For a worker 981797e8345STejun Heo * to match, its current execution should match the address of @work and 982797e8345STejun Heo * its work function. This is to avoid unwanted dependency between 983797e8345STejun Heo * unrelated work executions through a work item being recycled while still 984797e8345STejun Heo * being executed. 985797e8345STejun Heo * 986797e8345STejun Heo * This is a bit tricky. A work item may be freed once its execution 987797e8345STejun Heo * starts and nothing prevents the freed area from being recycled for 988797e8345STejun Heo * another work item. If the same work item address ends up being reused 989797e8345STejun Heo * before the original execution finishes, workqueue will identify the 990797e8345STejun Heo * recycled work item as currently executing and make it wait until the 991797e8345STejun Heo * current execution finishes, introducing an unwanted dependency. 992797e8345STejun Heo * 993797e8345STejun Heo * This function checks the work item address and work function to avoid 994797e8345STejun Heo * false positives. Note that this isn't complete as one may construct a 995797e8345STejun Heo * work function which can introduce dependency onto itself through a 996797e8345STejun Heo * recycled work item. Well, if somebody wants to shoot oneself in the 997797e8345STejun Heo * foot that badly, there's only so much we can do, and if such deadlock 998797e8345STejun Heo * actually occurs, it should be easy to locate the culprit work function. 999797e8345STejun Heo * 1000797e8345STejun Heo * CONTEXT: 1001797e8345STejun Heo * raw_spin_lock_irq(pool->lock). 1002797e8345STejun Heo * 1003797e8345STejun Heo * Return: 1004797e8345STejun Heo * Pointer to worker which is executing @work if found, %NULL 1005797e8345STejun Heo * otherwise. 1006797e8345STejun Heo */ 1007797e8345STejun Heo static struct worker *find_worker_executing_work(struct worker_pool *pool, 1008797e8345STejun Heo struct work_struct *work) 1009797e8345STejun Heo { 1010797e8345STejun Heo struct worker *worker; 1011797e8345STejun Heo 1012797e8345STejun Heo hash_for_each_possible(pool->busy_hash, worker, hentry, 1013797e8345STejun Heo (unsigned long)work) 1014797e8345STejun Heo if (worker->current_work == work && 1015797e8345STejun Heo worker->current_func == work->func) 1016797e8345STejun Heo return worker; 1017797e8345STejun Heo 1018797e8345STejun Heo return NULL; 1019797e8345STejun Heo } 1020797e8345STejun Heo 1021797e8345STejun Heo /** 1022797e8345STejun Heo * move_linked_works - move linked works to a list 1023797e8345STejun Heo * @work: start of series of works to be scheduled 1024797e8345STejun Heo * @head: target list to append @work to 1025797e8345STejun Heo * @nextp: out parameter for nested worklist walking 1026797e8345STejun Heo * 1027873eaca6STejun Heo * Schedule linked works starting from @work to @head. Work series to be 1028873eaca6STejun Heo * scheduled starts at @work and includes any consecutive work with 1029873eaca6STejun Heo * WORK_STRUCT_LINKED set in its predecessor. See assign_work() for details on 1030873eaca6STejun Heo * @nextp. 1031797e8345STejun Heo * 1032797e8345STejun Heo * CONTEXT: 1033797e8345STejun Heo * raw_spin_lock_irq(pool->lock). 1034797e8345STejun Heo */ 1035797e8345STejun Heo static void move_linked_works(struct work_struct *work, struct list_head *head, 1036797e8345STejun Heo struct work_struct **nextp) 1037797e8345STejun Heo { 1038797e8345STejun Heo struct work_struct *n; 1039797e8345STejun Heo 1040797e8345STejun Heo /* 1041797e8345STejun Heo * Linked worklist will always end before the end of the list, 1042797e8345STejun Heo * use NULL for list head. 1043797e8345STejun Heo */ 1044797e8345STejun Heo list_for_each_entry_safe_from(work, n, NULL, entry) { 1045797e8345STejun Heo list_move_tail(&work->entry, head); 1046797e8345STejun Heo if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 1047797e8345STejun Heo break; 1048797e8345STejun Heo } 1049797e8345STejun Heo 1050797e8345STejun Heo /* 1051797e8345STejun Heo * If we're already inside safe list traversal and have moved 1052797e8345STejun Heo * multiple works to the scheduled queue, the next position 1053797e8345STejun Heo * needs to be updated. 1054797e8345STejun Heo */ 1055797e8345STejun Heo if (nextp) 1056797e8345STejun Heo *nextp = n; 1057797e8345STejun Heo } 1058797e8345STejun Heo 1059797e8345STejun Heo /** 1060873eaca6STejun Heo * assign_work - assign a work item and its linked work items to a worker 1061873eaca6STejun Heo * @work: work to assign 1062873eaca6STejun Heo * @worker: worker to assign to 1063873eaca6STejun Heo * @nextp: out parameter for nested worklist walking 1064873eaca6STejun Heo * 1065873eaca6STejun Heo * Assign @work and its linked work items to @worker. If @work is already being 1066873eaca6STejun Heo * executed by another worker in the same pool, it'll be punted there. 1067873eaca6STejun Heo * 1068873eaca6STejun Heo * If @nextp is not NULL, it's updated to point to the next work of the last 1069873eaca6STejun Heo * scheduled work. This allows assign_work() to be nested inside 1070873eaca6STejun Heo * list_for_each_entry_safe(). 1071873eaca6STejun Heo * 1072873eaca6STejun Heo * Returns %true if @work was successfully assigned to @worker. %false if @work 1073873eaca6STejun Heo * was punted to another worker already executing it. 1074873eaca6STejun Heo */ 1075873eaca6STejun Heo static bool assign_work(struct work_struct *work, struct worker *worker, 1076873eaca6STejun Heo struct work_struct **nextp) 1077873eaca6STejun Heo { 1078873eaca6STejun Heo struct worker_pool *pool = worker->pool; 1079873eaca6STejun Heo struct worker *collision; 1080873eaca6STejun Heo 1081873eaca6STejun Heo lockdep_assert_held(&pool->lock); 1082873eaca6STejun Heo 1083873eaca6STejun Heo /* 1084873eaca6STejun Heo * A single work shouldn't be executed concurrently by multiple workers. 1085873eaca6STejun Heo * __queue_work() ensures that @work doesn't jump to a different pool 1086873eaca6STejun Heo * while still running in the previous pool. Here, we should ensure that 1087873eaca6STejun Heo * @work is not executed concurrently by multiple workers from the same 1088873eaca6STejun Heo * pool. Check whether anyone is already processing the work. If so, 1089873eaca6STejun Heo * defer the work to the currently executing one. 1090873eaca6STejun Heo */ 1091873eaca6STejun Heo collision = find_worker_executing_work(pool, work); 1092873eaca6STejun Heo if (unlikely(collision)) { 1093873eaca6STejun Heo move_linked_works(work, &collision->scheduled, nextp); 1094873eaca6STejun Heo return false; 1095873eaca6STejun Heo } 1096873eaca6STejun Heo 1097873eaca6STejun Heo move_linked_works(work, &worker->scheduled, nextp); 1098873eaca6STejun Heo return true; 1099873eaca6STejun Heo } 1100873eaca6STejun Heo 1101873eaca6STejun Heo /** 11020219a352STejun Heo * kick_pool - wake up an idle worker if necessary 11030219a352STejun Heo * @pool: pool to kick 1104797e8345STejun Heo * 11050219a352STejun Heo * @pool may have pending work items. Wake up worker if necessary. Returns 11060219a352STejun Heo * whether a worker was woken up. 1107797e8345STejun Heo */ 11080219a352STejun Heo static bool kick_pool(struct worker_pool *pool) 1109797e8345STejun Heo { 1110797e8345STejun Heo struct worker *worker = first_idle_worker(pool); 11118639ecebSTejun Heo struct task_struct *p; 1112797e8345STejun Heo 11130219a352STejun Heo lockdep_assert_held(&pool->lock); 11140219a352STejun Heo 11150219a352STejun Heo if (!need_more_worker(pool) || !worker) 11160219a352STejun Heo return false; 11170219a352STejun Heo 11188639ecebSTejun Heo p = worker->task; 11198639ecebSTejun Heo 11208639ecebSTejun Heo #ifdef CONFIG_SMP 11218639ecebSTejun Heo /* 11228639ecebSTejun Heo * Idle @worker is about to execute @work and waking up provides an 11238639ecebSTejun Heo * opportunity to migrate @worker at a lower cost by setting the task's 11248639ecebSTejun Heo * wake_cpu field. Let's see if we want to move @worker to improve 11258639ecebSTejun Heo * execution locality. 11268639ecebSTejun Heo * 11278639ecebSTejun Heo * We're waking the worker that went idle the latest and there's some 11288639ecebSTejun Heo * chance that @worker is marked idle but hasn't gone off CPU yet. If 11298639ecebSTejun Heo * so, setting the wake_cpu won't do anything. As this is a best-effort 11308639ecebSTejun Heo * optimization and the race window is narrow, let's leave as-is for 11318639ecebSTejun Heo * now. If this becomes pronounced, we can skip over workers which are 11328639ecebSTejun Heo * still on cpu when picking an idle worker. 11338639ecebSTejun Heo * 11348639ecebSTejun Heo * If @pool has non-strict affinity, @worker might have ended up outside 11358639ecebSTejun Heo * its affinity scope. Repatriate. 11368639ecebSTejun Heo */ 11378639ecebSTejun Heo if (!pool->attrs->affn_strict && 11388639ecebSTejun Heo !cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) { 11398639ecebSTejun Heo struct work_struct *work = list_first_entry(&pool->worklist, 11408639ecebSTejun Heo struct work_struct, entry); 11418639ecebSTejun Heo p->wake_cpu = cpumask_any_distribute(pool->attrs->__pod_cpumask); 11428639ecebSTejun Heo get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++; 11438639ecebSTejun Heo } 11448639ecebSTejun Heo #endif 11458639ecebSTejun Heo wake_up_process(p); 11460219a352STejun Heo return true; 1147797e8345STejun Heo } 1148797e8345STejun Heo 114963638450STejun Heo #ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT 115063638450STejun Heo 115163638450STejun Heo /* 115263638450STejun Heo * Concurrency-managed per-cpu work items that hog CPU for longer than 115363638450STejun Heo * wq_cpu_intensive_thresh_us trigger the automatic CPU_INTENSIVE mechanism, 115463638450STejun Heo * which prevents them from stalling other concurrency-managed work items. If a 115563638450STejun Heo * work function keeps triggering this mechanism, it's likely that the work item 115663638450STejun Heo * should be using an unbound workqueue instead. 115763638450STejun Heo * 115863638450STejun Heo * wq_cpu_intensive_report() tracks work functions which trigger such conditions 115963638450STejun Heo * and report them so that they can be examined and converted to use unbound 116063638450STejun Heo * workqueues as appropriate. To avoid flooding the console, each violating work 116163638450STejun Heo * function is tracked and reported with exponential backoff. 116263638450STejun Heo */ 116363638450STejun Heo #define WCI_MAX_ENTS 128 116463638450STejun Heo 116563638450STejun Heo struct wci_ent { 116663638450STejun Heo work_func_t func; 116763638450STejun Heo atomic64_t cnt; 116863638450STejun Heo struct hlist_node hash_node; 116963638450STejun Heo }; 117063638450STejun Heo 117163638450STejun Heo static struct wci_ent wci_ents[WCI_MAX_ENTS]; 117263638450STejun Heo static int wci_nr_ents; 117363638450STejun Heo static DEFINE_RAW_SPINLOCK(wci_lock); 117463638450STejun Heo static DEFINE_HASHTABLE(wci_hash, ilog2(WCI_MAX_ENTS)); 117563638450STejun Heo 117663638450STejun Heo static struct wci_ent *wci_find_ent(work_func_t func) 117763638450STejun Heo { 117863638450STejun Heo struct wci_ent *ent; 117963638450STejun Heo 118063638450STejun Heo hash_for_each_possible_rcu(wci_hash, ent, hash_node, 118163638450STejun Heo (unsigned long)func) { 118263638450STejun Heo if (ent->func == func) 118363638450STejun Heo return ent; 118463638450STejun Heo } 118563638450STejun Heo return NULL; 118663638450STejun Heo } 118763638450STejun Heo 118863638450STejun Heo static void wq_cpu_intensive_report(work_func_t func) 118963638450STejun Heo { 119063638450STejun Heo struct wci_ent *ent; 119163638450STejun Heo 119263638450STejun Heo restart: 119363638450STejun Heo ent = wci_find_ent(func); 119463638450STejun Heo if (ent) { 119563638450STejun Heo u64 cnt; 119663638450STejun Heo 119763638450STejun Heo /* 119863638450STejun Heo * Start reporting from the fourth time and back off 119963638450STejun Heo * exponentially. 120063638450STejun Heo */ 120163638450STejun Heo cnt = atomic64_inc_return_relaxed(&ent->cnt); 120263638450STejun Heo if (cnt >= 4 && is_power_of_2(cnt)) 120363638450STejun Heo printk_deferred(KERN_WARNING "workqueue: %ps hogged CPU for >%luus %llu times, consider switching to WQ_UNBOUND\n", 120463638450STejun Heo ent->func, wq_cpu_intensive_thresh_us, 120563638450STejun Heo atomic64_read(&ent->cnt)); 120663638450STejun Heo return; 120763638450STejun Heo } 120863638450STejun Heo 120963638450STejun Heo /* 121063638450STejun Heo * @func is a new violation. Allocate a new entry for it. If wcn_ents[] 121163638450STejun Heo * is exhausted, something went really wrong and we probably made enough 121263638450STejun Heo * noise already. 121363638450STejun Heo */ 121463638450STejun Heo if (wci_nr_ents >= WCI_MAX_ENTS) 121563638450STejun Heo return; 121663638450STejun Heo 121763638450STejun Heo raw_spin_lock(&wci_lock); 121863638450STejun Heo 121963638450STejun Heo if (wci_nr_ents >= WCI_MAX_ENTS) { 122063638450STejun Heo raw_spin_unlock(&wci_lock); 122163638450STejun Heo return; 122263638450STejun Heo } 122363638450STejun Heo 122463638450STejun Heo if (wci_find_ent(func)) { 122563638450STejun Heo raw_spin_unlock(&wci_lock); 122663638450STejun Heo goto restart; 122763638450STejun Heo } 122863638450STejun Heo 122963638450STejun Heo ent = &wci_ents[wci_nr_ents++]; 123063638450STejun Heo ent->func = func; 123163638450STejun Heo atomic64_set(&ent->cnt, 1); 123263638450STejun Heo hash_add_rcu(wci_hash, &ent->hash_node, (unsigned long)func); 123363638450STejun Heo 123463638450STejun Heo raw_spin_unlock(&wci_lock); 123563638450STejun Heo } 123663638450STejun Heo 123763638450STejun Heo #else /* CONFIG_WQ_CPU_INTENSIVE_REPORT */ 123863638450STejun Heo static void wq_cpu_intensive_report(work_func_t func) {} 123963638450STejun Heo #endif /* CONFIG_WQ_CPU_INTENSIVE_REPORT */ 124063638450STejun Heo 1241c54d5046STejun Heo /** 12421da177e4SLinus Torvalds * wq_worker_running - a worker is running again 12431da177e4SLinus Torvalds * @task: task waking up 12441da177e4SLinus Torvalds * 12451da177e4SLinus Torvalds * This function is called when a worker returns from schedule() 12461da177e4SLinus Torvalds */ 12471da177e4SLinus Torvalds void wq_worker_running(struct task_struct *task) 12481da177e4SLinus Torvalds { 12491da177e4SLinus Torvalds struct worker *worker = kthread_data(task); 12501da177e4SLinus Torvalds 1251c8f6219bSZqiang if (!READ_ONCE(worker->sleeping)) 12521da177e4SLinus Torvalds return; 12531da177e4SLinus Torvalds 12541da177e4SLinus Torvalds /* 12551da177e4SLinus Torvalds * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check 12561da177e4SLinus Torvalds * and the nr_running increment below, we may ruin the nr_running reset 12571da177e4SLinus Torvalds * and leave with an unexpected pool->nr_running == 1 on the newly unbound 12581da177e4SLinus Torvalds * pool. Protect against such race. 12591da177e4SLinus Torvalds */ 12601da177e4SLinus Torvalds preempt_disable(); 12611da177e4SLinus Torvalds if (!(worker->flags & WORKER_NOT_RUNNING)) 12621da177e4SLinus Torvalds worker->pool->nr_running++; 12631da177e4SLinus Torvalds preempt_enable(); 1264616db877STejun Heo 1265616db877STejun Heo /* 1266616db877STejun Heo * CPU intensive auto-detection cares about how long a work item hogged 1267616db877STejun Heo * CPU without sleeping. Reset the starting timestamp on wakeup. 1268616db877STejun Heo */ 1269616db877STejun Heo worker->current_at = worker->task->se.sum_exec_runtime; 1270616db877STejun Heo 1271c8f6219bSZqiang WRITE_ONCE(worker->sleeping, 0); 12721da177e4SLinus Torvalds } 12731da177e4SLinus Torvalds 12741da177e4SLinus Torvalds /** 12751da177e4SLinus Torvalds * wq_worker_sleeping - a worker is going to sleep 12761da177e4SLinus Torvalds * @task: task going to sleep 12771da177e4SLinus Torvalds * 12781da177e4SLinus Torvalds * This function is called from schedule() when a busy worker is 12791da177e4SLinus Torvalds * going to sleep. 12801da177e4SLinus Torvalds */ 12811da177e4SLinus Torvalds void wq_worker_sleeping(struct task_struct *task) 12821da177e4SLinus Torvalds { 12831da177e4SLinus Torvalds struct worker *worker = kthread_data(task); 12841da177e4SLinus Torvalds struct worker_pool *pool; 12851da177e4SLinus Torvalds 12861da177e4SLinus Torvalds /* 12871da177e4SLinus Torvalds * Rescuers, which may not have all the fields set up like normal 12881da177e4SLinus Torvalds * workers, also reach here, let's not access anything before 12891da177e4SLinus Torvalds * checking NOT_RUNNING. 12901da177e4SLinus Torvalds */ 12911da177e4SLinus Torvalds if (worker->flags & WORKER_NOT_RUNNING) 12921da177e4SLinus Torvalds return; 12931da177e4SLinus Torvalds 12941da177e4SLinus Torvalds pool = worker->pool; 12951da177e4SLinus Torvalds 12961da177e4SLinus Torvalds /* Return if preempted before wq_worker_running() was reached */ 1297c8f6219bSZqiang if (READ_ONCE(worker->sleeping)) 12981da177e4SLinus Torvalds return; 12991da177e4SLinus Torvalds 1300c8f6219bSZqiang WRITE_ONCE(worker->sleeping, 1); 13011da177e4SLinus Torvalds raw_spin_lock_irq(&pool->lock); 13021da177e4SLinus Torvalds 13031da177e4SLinus Torvalds /* 13041da177e4SLinus Torvalds * Recheck in case unbind_workers() preempted us. We don't 13051da177e4SLinus Torvalds * want to decrement nr_running after the worker is unbound 13061da177e4SLinus Torvalds * and nr_running has been reset. 13071da177e4SLinus Torvalds */ 13081da177e4SLinus Torvalds if (worker->flags & WORKER_NOT_RUNNING) { 13091da177e4SLinus Torvalds raw_spin_unlock_irq(&pool->lock); 13101da177e4SLinus Torvalds return; 13111da177e4SLinus Torvalds } 13121da177e4SLinus Torvalds 13131da177e4SLinus Torvalds pool->nr_running--; 13140219a352STejun Heo if (kick_pool(pool)) 1315725e8ec5STejun Heo worker->current_pwq->stats[PWQ_STAT_CM_WAKEUP]++; 13160219a352STejun Heo 13171da177e4SLinus Torvalds raw_spin_unlock_irq(&pool->lock); 13181da177e4SLinus Torvalds } 13191da177e4SLinus Torvalds 13201da177e4SLinus Torvalds /** 1321616db877STejun Heo * wq_worker_tick - a scheduler tick occurred while a kworker is running 1322616db877STejun Heo * @task: task currently running 1323616db877STejun Heo * 1324616db877STejun Heo * Called from scheduler_tick(). We're in the IRQ context and the current 1325616db877STejun Heo * worker's fields which follow the 'K' locking rule can be accessed safely. 1326616db877STejun Heo */ 1327616db877STejun Heo void wq_worker_tick(struct task_struct *task) 1328616db877STejun Heo { 1329616db877STejun Heo struct worker *worker = kthread_data(task); 1330616db877STejun Heo struct pool_workqueue *pwq = worker->current_pwq; 1331616db877STejun Heo struct worker_pool *pool = worker->pool; 1332616db877STejun Heo 1333616db877STejun Heo if (!pwq) 1334616db877STejun Heo return; 1335616db877STejun Heo 13368a1dd1e5STejun Heo pwq->stats[PWQ_STAT_CPU_TIME] += TICK_USEC; 13378a1dd1e5STejun Heo 133818c8ae81SZqiang if (!wq_cpu_intensive_thresh_us) 133918c8ae81SZqiang return; 134018c8ae81SZqiang 1341616db877STejun Heo /* 1342616db877STejun Heo * If the current worker is concurrency managed and hogged the CPU for 1343616db877STejun Heo * longer than wq_cpu_intensive_thresh_us, it's automatically marked 1344616db877STejun Heo * CPU_INTENSIVE to avoid stalling other concurrency-managed work items. 1345c8f6219bSZqiang * 1346c8f6219bSZqiang * Set @worker->sleeping means that @worker is in the process of 1347c8f6219bSZqiang * switching out voluntarily and won't be contributing to 1348c8f6219bSZqiang * @pool->nr_running until it wakes up. As wq_worker_sleeping() also 1349c8f6219bSZqiang * decrements ->nr_running, setting CPU_INTENSIVE here can lead to 1350c8f6219bSZqiang * double decrements. The task is releasing the CPU anyway. Let's skip. 1351c8f6219bSZqiang * We probably want to make this prettier in the future. 1352616db877STejun Heo */ 1353c8f6219bSZqiang if ((worker->flags & WORKER_NOT_RUNNING) || READ_ONCE(worker->sleeping) || 1354616db877STejun Heo worker->task->se.sum_exec_runtime - worker->current_at < 1355616db877STejun Heo wq_cpu_intensive_thresh_us * NSEC_PER_USEC) 1356616db877STejun Heo return; 1357616db877STejun Heo 1358616db877STejun Heo raw_spin_lock(&pool->lock); 1359616db877STejun Heo 1360616db877STejun Heo worker_set_flags(worker, WORKER_CPU_INTENSIVE); 136163638450STejun Heo wq_cpu_intensive_report(worker->current_func); 1362616db877STejun Heo pwq->stats[PWQ_STAT_CPU_INTENSIVE]++; 1363616db877STejun Heo 13640219a352STejun Heo if (kick_pool(pool)) 1365616db877STejun Heo pwq->stats[PWQ_STAT_CM_WAKEUP]++; 1366616db877STejun Heo 1367616db877STejun Heo raw_spin_unlock(&pool->lock); 1368616db877STejun Heo } 1369616db877STejun Heo 1370616db877STejun Heo /** 13711da177e4SLinus Torvalds * wq_worker_last_func - retrieve worker's last work function 13721da177e4SLinus Torvalds * @task: Task to retrieve last work function of. 13731da177e4SLinus Torvalds * 13741da177e4SLinus Torvalds * Determine the last function a worker executed. This is called from 13751da177e4SLinus Torvalds * the scheduler to get a worker's last known identity. 13761da177e4SLinus Torvalds * 13771da177e4SLinus Torvalds * CONTEXT: 13789b41ea72SAndrew Morton * raw_spin_lock_irq(rq->lock) 13791da177e4SLinus Torvalds * 13801da177e4SLinus Torvalds * This function is called during schedule() when a kworker is going 1381f756d5e2SNathan Lynch * to sleep. It's used by psi to identify aggregation workers during 1382f756d5e2SNathan Lynch * dequeuing, to allow periodic aggregation to shut-off when that 13831da177e4SLinus Torvalds * worker is the last task in the system or cgroup to go to sleep. 13841da177e4SLinus Torvalds * 13851da177e4SLinus Torvalds * As this function doesn't involve any workqueue-related locking, it 13861da177e4SLinus Torvalds * only returns stable values when called from inside the scheduler's 13871da177e4SLinus Torvalds * queuing and dequeuing paths, when @task, which must be a kworker, 13881da177e4SLinus Torvalds * is guaranteed to not be processing any works. 1389365970a1SDavid Howells * 1390365970a1SDavid Howells * Return: 1391365970a1SDavid Howells * The last work function %current executed as a worker, NULL if it 1392365970a1SDavid Howells * hasn't executed any work yet. 1393365970a1SDavid Howells */ 1394365970a1SDavid Howells work_func_t wq_worker_last_func(struct task_struct *task) 1395365970a1SDavid Howells { 1396365970a1SDavid Howells struct worker *worker = kthread_data(task); 1397365970a1SDavid Howells 1398365970a1SDavid Howells return worker->last_func; 1399365970a1SDavid Howells } 1400365970a1SDavid Howells 1401d302f017STejun Heo /** 14028864b4e5STejun Heo * get_pwq - get an extra reference on the specified pool_workqueue 14038864b4e5STejun Heo * @pwq: pool_workqueue to get 14048864b4e5STejun Heo * 14058864b4e5STejun Heo * Obtain an extra reference on @pwq. The caller should guarantee that 14068864b4e5STejun Heo * @pwq has positive refcnt and be holding the matching pool->lock. 14078864b4e5STejun Heo */ 14088864b4e5STejun Heo static void get_pwq(struct pool_workqueue *pwq) 14098864b4e5STejun Heo { 14108864b4e5STejun Heo lockdep_assert_held(&pwq->pool->lock); 14118864b4e5STejun Heo WARN_ON_ONCE(pwq->refcnt <= 0); 14128864b4e5STejun Heo pwq->refcnt++; 14138864b4e5STejun Heo } 14148864b4e5STejun Heo 14158864b4e5STejun Heo /** 14168864b4e5STejun Heo * put_pwq - put a pool_workqueue reference 14178864b4e5STejun Heo * @pwq: pool_workqueue to put 14188864b4e5STejun Heo * 14198864b4e5STejun Heo * Drop a reference of @pwq. If its refcnt reaches zero, schedule its 14208864b4e5STejun Heo * destruction. The caller should be holding the matching pool->lock. 14218864b4e5STejun Heo */ 14228864b4e5STejun Heo static void put_pwq(struct pool_workqueue *pwq) 14238864b4e5STejun Heo { 14248864b4e5STejun Heo lockdep_assert_held(&pwq->pool->lock); 14258864b4e5STejun Heo if (likely(--pwq->refcnt)) 14268864b4e5STejun Heo return; 14278864b4e5STejun Heo /* 1428967b494eSTejun Heo * @pwq can't be released under pool->lock, bounce to a dedicated 1429967b494eSTejun Heo * kthread_worker to avoid A-A deadlocks. 14308864b4e5STejun Heo */ 1431687a9aa5STejun Heo kthread_queue_work(pwq_release_worker, &pwq->release_work); 14328864b4e5STejun Heo } 14338864b4e5STejun Heo 1434dce90d47STejun Heo /** 1435dce90d47STejun Heo * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock 1436dce90d47STejun Heo * @pwq: pool_workqueue to put (can be %NULL) 1437dce90d47STejun Heo * 1438dce90d47STejun Heo * put_pwq() with locking. This function also allows %NULL @pwq. 1439dce90d47STejun Heo */ 1440dce90d47STejun Heo static void put_pwq_unlocked(struct pool_workqueue *pwq) 1441dce90d47STejun Heo { 1442dce90d47STejun Heo if (pwq) { 1443dce90d47STejun Heo /* 144424acfb71SThomas Gleixner * As both pwqs and pools are RCU protected, the 1445dce90d47STejun Heo * following lock operations are safe. 1446dce90d47STejun Heo */ 1447a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pwq->pool->lock); 1448dce90d47STejun Heo put_pwq(pwq); 1449a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pwq->pool->lock); 1450dce90d47STejun Heo } 1451dce90d47STejun Heo } 1452dce90d47STejun Heo 1453957578ecSGreg Kroah-Hartman static void pwq_activate_inactive_work(struct work_struct *work) 1454bf4ede01STejun Heo { 1455957578ecSGreg Kroah-Hartman struct pool_workqueue *pwq = get_work_pwq(work); 1456957578ecSGreg Kroah-Hartman 1457bf4ede01STejun Heo trace_workqueue_activate_work(work); 145882607adcSTejun Heo if (list_empty(&pwq->pool->worklist)) 145982607adcSTejun Heo pwq->pool->watchdog_ts = jiffies; 1460112202d9STejun Heo move_linked_works(work, &pwq->pool->worklist, NULL); 14615debbff9SGreg Kroah-Hartman __clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work)); 1462112202d9STejun Heo pwq->nr_active++; 1463bf4ede01STejun Heo } 1464bf4ede01STejun Heo 14655debbff9SGreg Kroah-Hartman static void pwq_activate_first_inactive(struct pool_workqueue *pwq) 14663aa62497SLai Jiangshan { 14675debbff9SGreg Kroah-Hartman struct work_struct *work = list_first_entry(&pwq->inactive_works, 14683aa62497SLai Jiangshan struct work_struct, entry); 14693aa62497SLai Jiangshan 1470957578ecSGreg Kroah-Hartman pwq_activate_inactive_work(work); 14713aa62497SLai Jiangshan } 14723aa62497SLai Jiangshan 1473bf4ede01STejun Heo /** 1474112202d9STejun Heo * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight 1475112202d9STejun Heo * @pwq: pwq of interest 1476c4560c2cSLai Jiangshan * @work_data: work_data of work which left the queue 1477bf4ede01STejun Heo * 1478bf4ede01STejun Heo * A work either has completed or is removed from pending queue, 1479112202d9STejun Heo * decrement nr_in_flight of its pwq and handle workqueue flushing. 1480bf4ede01STejun Heo * 1481bf4ede01STejun Heo * CONTEXT: 1482a9b8a985SSebastian Andrzej Siewior * raw_spin_lock_irq(pool->lock). 1483bf4ede01STejun Heo */ 1484c4560c2cSLai Jiangshan static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data) 1485bf4ede01STejun Heo { 1486c4560c2cSLai Jiangshan int color = get_work_color(work_data); 1487c4560c2cSLai Jiangshan 14885debbff9SGreg Kroah-Hartman if (!(work_data & WORK_STRUCT_INACTIVE)) { 14895debbff9SGreg Kroah-Hartman pwq->nr_active--; 14905debbff9SGreg Kroah-Hartman if (!list_empty(&pwq->inactive_works)) { 14915debbff9SGreg Kroah-Hartman /* one down, submit an inactive one */ 14925debbff9SGreg Kroah-Hartman if (pwq->nr_active < READ_ONCE(pwq->wq->max_active)) 14935debbff9SGreg Kroah-Hartman pwq_activate_first_inactive(pwq); 14945debbff9SGreg Kroah-Hartman } 14955debbff9SGreg Kroah-Hartman } 1496018f3a13SLai Jiangshan 1497018f3a13SLai Jiangshan pwq->nr_in_flight[color]--; 1498bf4ede01STejun Heo 1499bf4ede01STejun Heo /* is flush in progress and are we at the flushing tip? */ 1500112202d9STejun Heo if (likely(pwq->flush_color != color)) 15018864b4e5STejun Heo goto out_put; 1502bf4ede01STejun Heo 1503bf4ede01STejun Heo /* are there still in-flight works? */ 1504112202d9STejun Heo if (pwq->nr_in_flight[color]) 15058864b4e5STejun Heo goto out_put; 1506bf4ede01STejun Heo 1507112202d9STejun Heo /* this pwq is done, clear flush_color */ 1508112202d9STejun Heo pwq->flush_color = -1; 1509bf4ede01STejun Heo 1510bf4ede01STejun Heo /* 1511112202d9STejun Heo * If this was the last pwq, wake up the first flusher. It 1512bf4ede01STejun Heo * will handle the rest. 1513bf4ede01STejun Heo */ 1514112202d9STejun Heo if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) 1515112202d9STejun Heo complete(&pwq->wq->first_flusher->done); 15168864b4e5STejun Heo out_put: 15178864b4e5STejun Heo put_pwq(pwq); 1518bf4ede01STejun Heo } 1519bf4ede01STejun Heo 152036e227d2STejun Heo /** 1521bbb68dfaSTejun Heo * try_to_grab_pending - steal work item from worklist and disable irq 152236e227d2STejun Heo * @work: work item to steal 152336e227d2STejun Heo * @is_dwork: @work is a delayed_work 1524bbb68dfaSTejun Heo * @flags: place to store irq state 152536e227d2STejun Heo * 152636e227d2STejun Heo * Try to grab PENDING bit of @work. This function can handle @work in any 1527d185af30SYacine Belkadi * stable state - idle, on timer or on worklist. 152836e227d2STejun Heo * 1529d185af30SYacine Belkadi * Return: 15303eb6b31bSMauro Carvalho Chehab * 15313eb6b31bSMauro Carvalho Chehab * ======== ================================================================ 153236e227d2STejun Heo * 1 if @work was pending and we successfully stole PENDING 153336e227d2STejun Heo * 0 if @work was idle and we claimed PENDING 153436e227d2STejun Heo * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry 1535bbb68dfaSTejun Heo * -ENOENT if someone else is canceling @work, this state may persist 1536bbb68dfaSTejun Heo * for arbitrarily long 15373eb6b31bSMauro Carvalho Chehab * ======== ================================================================ 153836e227d2STejun Heo * 1539d185af30SYacine Belkadi * Note: 1540bbb68dfaSTejun Heo * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting 1541e0aecdd8STejun Heo * interrupted while holding PENDING and @work off queue, irq must be 1542e0aecdd8STejun Heo * disabled on entry. This, combined with delayed_work->timer being 1543e0aecdd8STejun Heo * irqsafe, ensures that we return -EAGAIN for finite short period of time. 1544bbb68dfaSTejun Heo * 1545bbb68dfaSTejun Heo * On successful return, >= 0, irq is disabled and the caller is 1546bbb68dfaSTejun Heo * responsible for releasing it using local_irq_restore(*@flags). 1547bbb68dfaSTejun Heo * 1548e0aecdd8STejun Heo * This function is safe to call from any context including IRQ handler. 1549bf4ede01STejun Heo */ 1550bbb68dfaSTejun Heo static int try_to_grab_pending(struct work_struct *work, bool is_dwork, 1551bbb68dfaSTejun Heo unsigned long *flags) 1552bf4ede01STejun Heo { 1553d565ed63STejun Heo struct worker_pool *pool; 1554112202d9STejun Heo struct pool_workqueue *pwq; 1555bf4ede01STejun Heo 1556bbb68dfaSTejun Heo local_irq_save(*flags); 1557bbb68dfaSTejun Heo 155836e227d2STejun Heo /* try to steal the timer if it exists */ 155936e227d2STejun Heo if (is_dwork) { 156036e227d2STejun Heo struct delayed_work *dwork = to_delayed_work(work); 156136e227d2STejun Heo 1562e0aecdd8STejun Heo /* 1563e0aecdd8STejun Heo * dwork->timer is irqsafe. If del_timer() fails, it's 1564e0aecdd8STejun Heo * guaranteed that the timer is not queued anywhere and not 1565e0aecdd8STejun Heo * running on the local CPU. 1566e0aecdd8STejun Heo */ 156736e227d2STejun Heo if (likely(del_timer(&dwork->timer))) 156836e227d2STejun Heo return 1; 156936e227d2STejun Heo } 157036e227d2STejun Heo 157136e227d2STejun Heo /* try to claim PENDING the normal way */ 1572bf4ede01STejun Heo if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) 1573bf4ede01STejun Heo return 0; 1574bf4ede01STejun Heo 157524acfb71SThomas Gleixner rcu_read_lock(); 1576bf4ede01STejun Heo /* 1577bf4ede01STejun Heo * The queueing is in progress, or it is already queued. Try to 1578bf4ede01STejun Heo * steal it from ->worklist without clearing WORK_STRUCT_PENDING. 1579bf4ede01STejun Heo */ 1580d565ed63STejun Heo pool = get_work_pool(work); 1581d565ed63STejun Heo if (!pool) 1582bbb68dfaSTejun Heo goto fail; 1583bf4ede01STejun Heo 1584a9b8a985SSebastian Andrzej Siewior raw_spin_lock(&pool->lock); 1585bf4ede01STejun Heo /* 1586112202d9STejun Heo * work->data is guaranteed to point to pwq only while the work 1587112202d9STejun Heo * item is queued on pwq->wq, and both updating work->data to point 1588112202d9STejun Heo * to pwq on queueing and to pool on dequeueing are done under 1589112202d9STejun Heo * pwq->pool->lock. This in turn guarantees that, if work->data 1590112202d9STejun Heo * points to pwq which is associated with a locked pool, the work 15910b3dae68SLai Jiangshan * item is currently queued on that pool. 1592bf4ede01STejun Heo */ 1593112202d9STejun Heo pwq = get_work_pwq(work); 1594112202d9STejun Heo if (pwq && pwq->pool == pool) { 1595bf4ede01STejun Heo debug_work_deactivate(work); 15963aa62497SLai Jiangshan 15973aa62497SLai Jiangshan /* 1598018f3a13SLai Jiangshan * A cancelable inactive work item must be in the 1599018f3a13SLai Jiangshan * pwq->inactive_works since a queued barrier can't be 1600018f3a13SLai Jiangshan * canceled (see the comments in insert_wq_barrier()). 1601018f3a13SLai Jiangshan * 1602f97a4a1aSLai Jiangshan * An inactive work item cannot be grabbed directly because 1603d812796eSLai Jiangshan * it might have linked barrier work items which, if left 1604f97a4a1aSLai Jiangshan * on the inactive_works list, will confuse pwq->nr_active 160516062836STejun Heo * management later on and cause stall. Make sure the work 160616062836STejun Heo * item is activated before grabbing. 16073aa62497SLai Jiangshan */ 1608957578ecSGreg Kroah-Hartman if (*work_data_bits(work) & WORK_STRUCT_INACTIVE) 1609957578ecSGreg Kroah-Hartman pwq_activate_inactive_work(work); 16103aa62497SLai Jiangshan 1611bf4ede01STejun Heo list_del_init(&work->entry); 1612c4560c2cSLai Jiangshan pwq_dec_nr_in_flight(pwq, *work_data_bits(work)); 161336e227d2STejun Heo 1614112202d9STejun Heo /* work->data points to pwq iff queued, point to pool */ 16154468a00fSLai Jiangshan set_work_pool_and_keep_pending(work, pool->id); 16164468a00fSLai Jiangshan 1617a9b8a985SSebastian Andrzej Siewior raw_spin_unlock(&pool->lock); 161824acfb71SThomas Gleixner rcu_read_unlock(); 161936e227d2STejun Heo return 1; 1620bf4ede01STejun Heo } 1621a9b8a985SSebastian Andrzej Siewior raw_spin_unlock(&pool->lock); 1622bbb68dfaSTejun Heo fail: 162324acfb71SThomas Gleixner rcu_read_unlock(); 1624bbb68dfaSTejun Heo local_irq_restore(*flags); 1625bbb68dfaSTejun Heo if (work_is_canceling(work)) 1626bbb68dfaSTejun Heo return -ENOENT; 1627bbb68dfaSTejun Heo cpu_relax(); 162836e227d2STejun Heo return -EAGAIN; 1629bf4ede01STejun Heo } 1630bf4ede01STejun Heo 1631bf4ede01STejun Heo /** 1632706026c2STejun Heo * insert_work - insert a work into a pool 1633112202d9STejun Heo * @pwq: pwq @work belongs to 16344690c4abSTejun Heo * @work: work to insert 16354690c4abSTejun Heo * @head: insertion point 16364690c4abSTejun Heo * @extra_flags: extra WORK_STRUCT_* flags to set 16374690c4abSTejun Heo * 1638112202d9STejun Heo * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to 1639706026c2STejun Heo * work_struct flags. 16404690c4abSTejun Heo * 16414690c4abSTejun Heo * CONTEXT: 1642a9b8a985SSebastian Andrzej Siewior * raw_spin_lock_irq(pool->lock). 1643365970a1SDavid Howells */ 1644112202d9STejun Heo static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, 1645112202d9STejun Heo struct list_head *head, unsigned int extra_flags) 1646b89deed3SOleg Nesterov { 1647fe089f87STejun Heo debug_work_activate(work); 1648e1d8aa9fSFrederic Weisbecker 1649e89a85d6SWalter Wu /* record the work call stack in order to print it in KASAN reports */ 1650f70da745SMarco Elver kasan_record_aux_stack_noalloc(work); 1651e89a85d6SWalter Wu 16524690c4abSTejun Heo /* we own @work, set data and link */ 1653112202d9STejun Heo set_work_pwq(work, pwq, extra_flags); 16541a4d9b0aSOleg Nesterov list_add_tail(&work->entry, head); 16558864b4e5STejun Heo get_pwq(pwq); 1656b89deed3SOleg Nesterov } 1657b89deed3SOleg Nesterov 1658c8efcc25STejun Heo /* 1659c8efcc25STejun Heo * Test whether @work is being queued from another work executing on the 16608d03ecfeSTejun Heo * same workqueue. 1661c8efcc25STejun Heo */ 1662c8efcc25STejun Heo static bool is_chained_work(struct workqueue_struct *wq) 1663c8efcc25STejun Heo { 1664c8efcc25STejun Heo struct worker *worker; 1665c8efcc25STejun Heo 16668d03ecfeSTejun Heo worker = current_wq_worker(); 1667c8efcc25STejun Heo /* 1668bf393fd4SBart Van Assche * Return %true iff I'm a worker executing a work item on @wq. If 16698d03ecfeSTejun Heo * I'm @worker, it's safe to dereference it without locking. 1670c8efcc25STejun Heo */ 1671112202d9STejun Heo return worker && worker->current_pwq->wq == wq; 1672c8efcc25STejun Heo } 1673c8efcc25STejun Heo 1674ef557180SMike Galbraith /* 1675ef557180SMike Galbraith * When queueing an unbound work item to a wq, prefer local CPU if allowed 1676ef557180SMike Galbraith * by wq_unbound_cpumask. Otherwise, round robin among the allowed ones to 1677ef557180SMike Galbraith * avoid perturbing sensitive tasks. 1678ef557180SMike Galbraith */ 1679ef557180SMike Galbraith static int wq_select_unbound_cpu(int cpu) 1680ef557180SMike Galbraith { 1681ef557180SMike Galbraith int new_cpu; 1682ef557180SMike Galbraith 1683f303fccbSTejun Heo if (likely(!wq_debug_force_rr_cpu)) { 1684ef557180SMike Galbraith if (cpumask_test_cpu(cpu, wq_unbound_cpumask)) 1685ef557180SMike Galbraith return cpu; 1686a8ec5880SAmmar Faizi } else { 1687a8ec5880SAmmar Faizi pr_warn_once("workqueue: round-robin CPU selection forced, expect performance impact\n"); 1688f303fccbSTejun Heo } 1689f303fccbSTejun Heo 1690ef557180SMike Galbraith new_cpu = __this_cpu_read(wq_rr_cpu_last); 1691ef557180SMike Galbraith new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask); 1692ef557180SMike Galbraith if (unlikely(new_cpu >= nr_cpu_ids)) { 1693ef557180SMike Galbraith new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask); 1694ef557180SMike Galbraith if (unlikely(new_cpu >= nr_cpu_ids)) 1695ef557180SMike Galbraith return cpu; 1696ef557180SMike Galbraith } 1697ef557180SMike Galbraith __this_cpu_write(wq_rr_cpu_last, new_cpu); 1698ef557180SMike Galbraith 1699ef557180SMike Galbraith return new_cpu; 1700ef557180SMike Galbraith } 1701ef557180SMike Galbraith 1702d84ff051STejun Heo static void __queue_work(int cpu, struct workqueue_struct *wq, 17031da177e4SLinus Torvalds struct work_struct *work) 17041da177e4SLinus Torvalds { 1705112202d9STejun Heo struct pool_workqueue *pwq; 1706fe089f87STejun Heo struct worker_pool *last_pool, *pool; 17078a2e8e5dSTejun Heo unsigned int work_flags; 1708b75cac93SJoonsoo Kim unsigned int req_cpu = cpu; 17098930cabaSTejun Heo 17108930cabaSTejun Heo /* 17118930cabaSTejun Heo * While a work item is PENDING && off queue, a task trying to 17128930cabaSTejun Heo * steal the PENDING will busy-loop waiting for it to either get 17138930cabaSTejun Heo * queued or lose PENDING. Grabbing PENDING and queueing should 17148930cabaSTejun Heo * happen with IRQ disabled. 17158930cabaSTejun Heo */ 17168e8eb730SFrederic Weisbecker lockdep_assert_irqs_disabled(); 17171da177e4SLinus Torvalds 17181e19ffc6STejun Heo 171933e3f0a3SRichard Clark /* 172033e3f0a3SRichard Clark * For a draining wq, only works from the same workqueue are 172133e3f0a3SRichard Clark * allowed. The __WQ_DESTROYING helps to spot the issue that 172233e3f0a3SRichard Clark * queues a new work item to a wq after destroy_workqueue(wq). 172333e3f0a3SRichard Clark */ 172433e3f0a3SRichard Clark if (unlikely(wq->flags & (__WQ_DESTROYING | __WQ_DRAINING) && 172533e3f0a3SRichard Clark WARN_ON_ONCE(!is_chained_work(wq)))) 1726e41e704bSTejun Heo return; 172724acfb71SThomas Gleixner rcu_read_lock(); 17289e8cd2f5STejun Heo retry: 1729aa202f1fSHillf Danton /* pwq which will be used unless @work is executing elsewhere */ 1730636b927eSTejun Heo if (req_cpu == WORK_CPU_UNBOUND) { 1731636b927eSTejun Heo if (wq->flags & WQ_UNBOUND) 1732ef557180SMike Galbraith cpu = wq_select_unbound_cpu(raw_smp_processor_id()); 1733636b927eSTejun Heo else 1734aa202f1fSHillf Danton cpu = raw_smp_processor_id(); 1735aa202f1fSHillf Danton } 1736f3421797STejun Heo 1737636b927eSTejun Heo pwq = rcu_dereference(*per_cpu_ptr(wq->cpu_pwq, cpu)); 1738fe089f87STejun Heo pool = pwq->pool; 1739fe089f87STejun Heo 174018aa9effSTejun Heo /* 1741c9178087STejun Heo * If @work was previously on a different pool, it might still be 1742c9178087STejun Heo * running there, in which case the work needs to be queued on that 1743c9178087STejun Heo * pool to guarantee non-reentrancy. 174418aa9effSTejun Heo */ 1745c9e7cf27STejun Heo last_pool = get_work_pool(work); 1746fe089f87STejun Heo if (last_pool && last_pool != pool) { 174718aa9effSTejun Heo struct worker *worker; 174818aa9effSTejun Heo 1749a9b8a985SSebastian Andrzej Siewior raw_spin_lock(&last_pool->lock); 175018aa9effSTejun Heo 1751c9e7cf27STejun Heo worker = find_worker_executing_work(last_pool, work); 175218aa9effSTejun Heo 1753112202d9STejun Heo if (worker && worker->current_pwq->wq == wq) { 1754c9178087STejun Heo pwq = worker->current_pwq; 1755fe089f87STejun Heo pool = pwq->pool; 1756fe089f87STejun Heo WARN_ON_ONCE(pool != last_pool); 17578594fadeSLai Jiangshan } else { 175818aa9effSTejun Heo /* meh... not running there, queue here */ 1759a9b8a985SSebastian Andrzej Siewior raw_spin_unlock(&last_pool->lock); 1760fe089f87STejun Heo raw_spin_lock(&pool->lock); 176118aa9effSTejun Heo } 17628930cabaSTejun Heo } else { 1763fe089f87STejun Heo raw_spin_lock(&pool->lock); 17648930cabaSTejun Heo } 1765502ca9d8STejun Heo 17669e8cd2f5STejun Heo /* 1767636b927eSTejun Heo * pwq is determined and locked. For unbound pools, we could have raced 1768636b927eSTejun Heo * with pwq release and it could already be dead. If its refcnt is zero, 1769636b927eSTejun Heo * repeat pwq selection. Note that unbound pwqs never die without 1770636b927eSTejun Heo * another pwq replacing it in cpu_pwq or while work items are executing 1771636b927eSTejun Heo * on it, so the retrying is guaranteed to make forward-progress. 17729e8cd2f5STejun Heo */ 17739e8cd2f5STejun Heo if (unlikely(!pwq->refcnt)) { 17749e8cd2f5STejun Heo if (wq->flags & WQ_UNBOUND) { 1775fe089f87STejun Heo raw_spin_unlock(&pool->lock); 17769e8cd2f5STejun Heo cpu_relax(); 17779e8cd2f5STejun Heo goto retry; 17789e8cd2f5STejun Heo } 17799e8cd2f5STejun Heo /* oops */ 17809e8cd2f5STejun Heo WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt", 17819e8cd2f5STejun Heo wq->name, cpu); 17829e8cd2f5STejun Heo } 17839e8cd2f5STejun Heo 1784112202d9STejun Heo /* pwq determined, queue */ 1785112202d9STejun Heo trace_workqueue_queue_work(req_cpu, pwq, work); 1786502ca9d8STejun Heo 178724acfb71SThomas Gleixner if (WARN_ON(!list_empty(&work->entry))) 178824acfb71SThomas Gleixner goto out; 17891e19ffc6STejun Heo 1790112202d9STejun Heo pwq->nr_in_flight[pwq->work_color]++; 1791112202d9STejun Heo work_flags = work_color_to_flags(pwq->work_color); 17921e19ffc6STejun Heo 179382e098f5STejun Heo /* 179482e098f5STejun Heo * Limit the number of concurrently active work items to max_active. 179582e098f5STejun Heo * @work must also queue behind existing inactive work items to maintain 179682e098f5STejun Heo * ordering when max_active changes. See wq_adjust_max_active(). 179782e098f5STejun Heo */ 17985debbff9SGreg Kroah-Hartman if (list_empty(&pwq->inactive_works) && 17995debbff9SGreg Kroah-Hartman pwq->nr_active < READ_ONCE(pwq->wq->max_active)) { 1800fe089f87STejun Heo if (list_empty(&pool->worklist)) 1801fe089f87STejun Heo pool->watchdog_ts = jiffies; 1802fe089f87STejun Heo 1803cdadf009STejun Heo trace_workqueue_activate_work(work); 18045debbff9SGreg Kroah-Hartman pwq->nr_active++; 1805fe089f87STejun Heo insert_work(pwq, work, &pool->worklist, work_flags); 18060219a352STejun Heo kick_pool(pool); 18078a2e8e5dSTejun Heo } else { 1808f97a4a1aSLai Jiangshan work_flags |= WORK_STRUCT_INACTIVE; 1809fe089f87STejun Heo insert_work(pwq, work, &pwq->inactive_works, work_flags); 18108a2e8e5dSTejun Heo } 18111e19ffc6STejun Heo 181224acfb71SThomas Gleixner out: 1813fe089f87STejun Heo raw_spin_unlock(&pool->lock); 181424acfb71SThomas Gleixner rcu_read_unlock(); 18151da177e4SLinus Torvalds } 18161da177e4SLinus Torvalds 18170fcb78c2SRolf Eike Beer /** 1818c1a220e7SZhang Rui * queue_work_on - queue work on specific cpu 1819c1a220e7SZhang Rui * @cpu: CPU number to execute work on 1820c1a220e7SZhang Rui * @wq: workqueue to use 1821c1a220e7SZhang Rui * @work: work to queue 1822c1a220e7SZhang Rui * 1823c1a220e7SZhang Rui * We queue the work to a specific CPU, the caller must ensure it 1824443378f0SPaul E. McKenney * can't go away. Callers that fail to ensure that the specified 1825443378f0SPaul E. McKenney * CPU cannot go away will execute on a randomly chosen CPU. 1826854f5cc5SPaul E. McKenney * But note well that callers specifying a CPU that never has been 1827854f5cc5SPaul E. McKenney * online will get a splat. 1828d185af30SYacine Belkadi * 1829d185af30SYacine Belkadi * Return: %false if @work was already on a queue, %true otherwise. 1830c1a220e7SZhang Rui */ 1831d4283e93STejun Heo bool queue_work_on(int cpu, struct workqueue_struct *wq, 1832d4283e93STejun Heo struct work_struct *work) 1833c1a220e7SZhang Rui { 1834d4283e93STejun Heo bool ret = false; 18358930cabaSTejun Heo unsigned long flags; 18368930cabaSTejun Heo 18378930cabaSTejun Heo local_irq_save(flags); 1838c1a220e7SZhang Rui 183922df02bbSTejun Heo if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 18404690c4abSTejun Heo __queue_work(cpu, wq, work); 1841d4283e93STejun Heo ret = true; 1842c1a220e7SZhang Rui } 18438930cabaSTejun Heo 18448930cabaSTejun Heo local_irq_restore(flags); 1845c1a220e7SZhang Rui return ret; 1846c1a220e7SZhang Rui } 1847ad7b1f84SMarc Dionne EXPORT_SYMBOL(queue_work_on); 1848c1a220e7SZhang Rui 18498204e0c1SAlexander Duyck /** 1850fef59c9cSTejun Heo * select_numa_node_cpu - Select a CPU based on NUMA node 18518204e0c1SAlexander Duyck * @node: NUMA node ID that we want to select a CPU from 18528204e0c1SAlexander Duyck * 18538204e0c1SAlexander Duyck * This function will attempt to find a "random" cpu available on a given 18548204e0c1SAlexander Duyck * node. If there are no CPUs available on the given node it will return 18558204e0c1SAlexander Duyck * WORK_CPU_UNBOUND indicating that we should just schedule to any 18568204e0c1SAlexander Duyck * available CPU if we need to schedule this work. 18578204e0c1SAlexander Duyck */ 1858fef59c9cSTejun Heo static int select_numa_node_cpu(int node) 18598204e0c1SAlexander Duyck { 18608204e0c1SAlexander Duyck int cpu; 18618204e0c1SAlexander Duyck 18628204e0c1SAlexander Duyck /* Delay binding to CPU if node is not valid or online */ 18638204e0c1SAlexander Duyck if (node < 0 || node >= MAX_NUMNODES || !node_online(node)) 18648204e0c1SAlexander Duyck return WORK_CPU_UNBOUND; 18658204e0c1SAlexander Duyck 18668204e0c1SAlexander Duyck /* Use local node/cpu if we are already there */ 18678204e0c1SAlexander Duyck cpu = raw_smp_processor_id(); 18688204e0c1SAlexander Duyck if (node == cpu_to_node(cpu)) 18698204e0c1SAlexander Duyck return cpu; 18708204e0c1SAlexander Duyck 18718204e0c1SAlexander Duyck /* Use "random" otherwise know as "first" online CPU of node */ 18728204e0c1SAlexander Duyck cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask); 18738204e0c1SAlexander Duyck 18748204e0c1SAlexander Duyck /* If CPU is valid return that, otherwise just defer */ 18758204e0c1SAlexander Duyck return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND; 18768204e0c1SAlexander Duyck } 18778204e0c1SAlexander Duyck 18788204e0c1SAlexander Duyck /** 18798204e0c1SAlexander Duyck * queue_work_node - queue work on a "random" cpu for a given NUMA node 18808204e0c1SAlexander Duyck * @node: NUMA node that we are targeting the work for 18818204e0c1SAlexander Duyck * @wq: workqueue to use 18828204e0c1SAlexander Duyck * @work: work to queue 18838204e0c1SAlexander Duyck * 18848204e0c1SAlexander Duyck * We queue the work to a "random" CPU within a given NUMA node. The basic 18858204e0c1SAlexander Duyck * idea here is to provide a way to somehow associate work with a given 18868204e0c1SAlexander Duyck * NUMA node. 18878204e0c1SAlexander Duyck * 18888204e0c1SAlexander Duyck * This function will only make a best effort attempt at getting this onto 18898204e0c1SAlexander Duyck * the right NUMA node. If no node is requested or the requested node is 18908204e0c1SAlexander Duyck * offline then we just fall back to standard queue_work behavior. 18918204e0c1SAlexander Duyck * 18928204e0c1SAlexander Duyck * Currently the "random" CPU ends up being the first available CPU in the 18938204e0c1SAlexander Duyck * intersection of cpu_online_mask and the cpumask of the node, unless we 18948204e0c1SAlexander Duyck * are running on the node. In that case we just use the current CPU. 18958204e0c1SAlexander Duyck * 18968204e0c1SAlexander Duyck * Return: %false if @work was already on a queue, %true otherwise. 18978204e0c1SAlexander Duyck */ 18988204e0c1SAlexander Duyck bool queue_work_node(int node, struct workqueue_struct *wq, 18998204e0c1SAlexander Duyck struct work_struct *work) 19008204e0c1SAlexander Duyck { 19018204e0c1SAlexander Duyck unsigned long flags; 19028204e0c1SAlexander Duyck bool ret = false; 19038204e0c1SAlexander Duyck 19048204e0c1SAlexander Duyck /* 19058204e0c1SAlexander Duyck * This current implementation is specific to unbound workqueues. 19068204e0c1SAlexander Duyck * Specifically we only return the first available CPU for a given 19078204e0c1SAlexander Duyck * node instead of cycling through individual CPUs within the node. 19088204e0c1SAlexander Duyck * 19098204e0c1SAlexander Duyck * If this is used with a per-cpu workqueue then the logic in 19108204e0c1SAlexander Duyck * workqueue_select_cpu_near would need to be updated to allow for 19118204e0c1SAlexander Duyck * some round robin type logic. 19128204e0c1SAlexander Duyck */ 19138204e0c1SAlexander Duyck WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)); 19148204e0c1SAlexander Duyck 19158204e0c1SAlexander Duyck local_irq_save(flags); 19168204e0c1SAlexander Duyck 19178204e0c1SAlexander Duyck if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 1918fef59c9cSTejun Heo int cpu = select_numa_node_cpu(node); 19198204e0c1SAlexander Duyck 19208204e0c1SAlexander Duyck __queue_work(cpu, wq, work); 19218204e0c1SAlexander Duyck ret = true; 19228204e0c1SAlexander Duyck } 19238204e0c1SAlexander Duyck 19248204e0c1SAlexander Duyck local_irq_restore(flags); 19258204e0c1SAlexander Duyck return ret; 19268204e0c1SAlexander Duyck } 19278204e0c1SAlexander Duyck EXPORT_SYMBOL_GPL(queue_work_node); 19288204e0c1SAlexander Duyck 19298c20feb6SKees Cook void delayed_work_timer_fn(struct timer_list *t) 19301da177e4SLinus Torvalds { 19318c20feb6SKees Cook struct delayed_work *dwork = from_timer(dwork, t, timer); 19321da177e4SLinus Torvalds 1933e0aecdd8STejun Heo /* should have been called from irqsafe timer with irq already off */ 193460c057bcSLai Jiangshan __queue_work(dwork->cpu, dwork->wq, &dwork->work); 19351da177e4SLinus Torvalds } 19361438ade5SKonstantin Khlebnikov EXPORT_SYMBOL(delayed_work_timer_fn); 19371da177e4SLinus Torvalds 19387beb2edfSTejun Heo static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, 193952bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 19401da177e4SLinus Torvalds { 19417beb2edfSTejun Heo struct timer_list *timer = &dwork->timer; 19427beb2edfSTejun Heo struct work_struct *work = &dwork->work; 19431da177e4SLinus Torvalds 1944637fdbaeSTejun Heo WARN_ON_ONCE(!wq); 19454b243563SSami Tolvanen WARN_ON_ONCE(timer->function != delayed_work_timer_fn); 1946fc4b514fSTejun Heo WARN_ON_ONCE(timer_pending(timer)); 1947fc4b514fSTejun Heo WARN_ON_ONCE(!list_empty(&work->entry)); 19487beb2edfSTejun Heo 19498852aac2STejun Heo /* 19508852aac2STejun Heo * If @delay is 0, queue @dwork->work immediately. This is for 19518852aac2STejun Heo * both optimization and correctness. The earliest @timer can 19528852aac2STejun Heo * expire is on the closest next tick and delayed_work users depend 19538852aac2STejun Heo * on that there's no such delay when @delay is 0. 19548852aac2STejun Heo */ 19558852aac2STejun Heo if (!delay) { 19568852aac2STejun Heo __queue_work(cpu, wq, &dwork->work); 19578852aac2STejun Heo return; 19588852aac2STejun Heo } 19598852aac2STejun Heo 196060c057bcSLai Jiangshan dwork->wq = wq; 19611265057fSTejun Heo dwork->cpu = cpu; 19627beb2edfSTejun Heo timer->expires = jiffies + delay; 19637beb2edfSTejun Heo 1964041bd12eSTejun Heo if (unlikely(cpu != WORK_CPU_UNBOUND)) 19657beb2edfSTejun Heo add_timer_on(timer, cpu); 1966041bd12eSTejun Heo else 1967041bd12eSTejun Heo add_timer(timer); 19687beb2edfSTejun Heo } 19691da177e4SLinus Torvalds 19700fcb78c2SRolf Eike Beer /** 19710fcb78c2SRolf Eike Beer * queue_delayed_work_on - queue work on specific CPU after delay 19720fcb78c2SRolf Eike Beer * @cpu: CPU number to execute work on 19730fcb78c2SRolf Eike Beer * @wq: workqueue to use 1974af9997e4SRandy Dunlap * @dwork: work to queue 19750fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 19760fcb78c2SRolf Eike Beer * 1977d185af30SYacine Belkadi * Return: %false if @work was already on a queue, %true otherwise. If 1978715f1300STejun Heo * @delay is zero and @dwork is idle, it will be scheduled for immediate 1979715f1300STejun Heo * execution. 19800fcb78c2SRolf Eike Beer */ 1981d4283e93STejun Heo bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 198252bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 19837a6bc1cdSVenkatesh Pallipadi { 198452bad64dSDavid Howells struct work_struct *work = &dwork->work; 1985d4283e93STejun Heo bool ret = false; 19868930cabaSTejun Heo unsigned long flags; 19878930cabaSTejun Heo 19888930cabaSTejun Heo /* read the comment in __queue_work() */ 19898930cabaSTejun Heo local_irq_save(flags); 19907a6bc1cdSVenkatesh Pallipadi 199122df02bbSTejun Heo if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 19927beb2edfSTejun Heo __queue_delayed_work(cpu, wq, dwork, delay); 1993d4283e93STejun Heo ret = true; 19947a6bc1cdSVenkatesh Pallipadi } 19958930cabaSTejun Heo 19968930cabaSTejun Heo local_irq_restore(flags); 19977a6bc1cdSVenkatesh Pallipadi return ret; 19987a6bc1cdSVenkatesh Pallipadi } 1999ad7b1f84SMarc Dionne EXPORT_SYMBOL(queue_delayed_work_on); 20001da177e4SLinus Torvalds 2001c8e55f36STejun Heo /** 20028376fe22STejun Heo * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU 20038376fe22STejun Heo * @cpu: CPU number to execute work on 20048376fe22STejun Heo * @wq: workqueue to use 20058376fe22STejun Heo * @dwork: work to queue 20068376fe22STejun Heo * @delay: number of jiffies to wait before queueing 20078376fe22STejun Heo * 20088376fe22STejun Heo * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise, 20098376fe22STejun Heo * modify @dwork's timer so that it expires after @delay. If @delay is 20108376fe22STejun Heo * zero, @work is guaranteed to be scheduled immediately regardless of its 20118376fe22STejun Heo * current state. 20128376fe22STejun Heo * 2013d185af30SYacine Belkadi * Return: %false if @dwork was idle and queued, %true if @dwork was 20148376fe22STejun Heo * pending and its timer was modified. 20158376fe22STejun Heo * 2016e0aecdd8STejun Heo * This function is safe to call from any context including IRQ handler. 20178376fe22STejun Heo * See try_to_grab_pending() for details. 20188376fe22STejun Heo */ 20198376fe22STejun Heo bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 20208376fe22STejun Heo struct delayed_work *dwork, unsigned long delay) 20218376fe22STejun Heo { 20228376fe22STejun Heo unsigned long flags; 20238376fe22STejun Heo int ret; 20248376fe22STejun Heo 20258376fe22STejun Heo do { 20268376fe22STejun Heo ret = try_to_grab_pending(&dwork->work, true, &flags); 20278376fe22STejun Heo } while (unlikely(ret == -EAGAIN)); 20288376fe22STejun Heo 20298376fe22STejun Heo if (likely(ret >= 0)) { 20308376fe22STejun Heo __queue_delayed_work(cpu, wq, dwork, delay); 20318376fe22STejun Heo local_irq_restore(flags); 20328376fe22STejun Heo } 20338376fe22STejun Heo 20348376fe22STejun Heo /* -ENOENT from try_to_grab_pending() becomes %true */ 20358376fe22STejun Heo return ret; 20368376fe22STejun Heo } 20378376fe22STejun Heo EXPORT_SYMBOL_GPL(mod_delayed_work_on); 20388376fe22STejun Heo 203905f0fe6bSTejun Heo static void rcu_work_rcufn(struct rcu_head *rcu) 204005f0fe6bSTejun Heo { 204105f0fe6bSTejun Heo struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu); 204205f0fe6bSTejun Heo 204305f0fe6bSTejun Heo /* read the comment in __queue_work() */ 204405f0fe6bSTejun Heo local_irq_disable(); 204505f0fe6bSTejun Heo __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work); 204605f0fe6bSTejun Heo local_irq_enable(); 204705f0fe6bSTejun Heo } 204805f0fe6bSTejun Heo 204905f0fe6bSTejun Heo /** 205005f0fe6bSTejun Heo * queue_rcu_work - queue work after a RCU grace period 205105f0fe6bSTejun Heo * @wq: workqueue to use 205205f0fe6bSTejun Heo * @rwork: work to queue 205305f0fe6bSTejun Heo * 205405f0fe6bSTejun Heo * Return: %false if @rwork was already pending, %true otherwise. Note 205505f0fe6bSTejun Heo * that a full RCU grace period is guaranteed only after a %true return. 2056bf393fd4SBart Van Assche * While @rwork is guaranteed to be executed after a %false return, the 205705f0fe6bSTejun Heo * execution may happen before a full RCU grace period has passed. 205805f0fe6bSTejun Heo */ 205905f0fe6bSTejun Heo bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork) 206005f0fe6bSTejun Heo { 206105f0fe6bSTejun Heo struct work_struct *work = &rwork->work; 206205f0fe6bSTejun Heo 206305f0fe6bSTejun Heo if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 206405f0fe6bSTejun Heo rwork->wq = wq; 2065a7e30c0eSUladzislau Rezki call_rcu_hurry(&rwork->rcu, rcu_work_rcufn); 206605f0fe6bSTejun Heo return true; 206705f0fe6bSTejun Heo } 206805f0fe6bSTejun Heo 206905f0fe6bSTejun Heo return false; 207005f0fe6bSTejun Heo } 207105f0fe6bSTejun Heo EXPORT_SYMBOL(queue_rcu_work); 207205f0fe6bSTejun Heo 2073f7537df5SLai Jiangshan static struct worker *alloc_worker(int node) 2074c34056a3STejun Heo { 2075c34056a3STejun Heo struct worker *worker; 2076c34056a3STejun Heo 2077f7537df5SLai Jiangshan worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node); 2078c8e55f36STejun Heo if (worker) { 2079c8e55f36STejun Heo INIT_LIST_HEAD(&worker->entry); 2080affee4b2STejun Heo INIT_LIST_HEAD(&worker->scheduled); 2081da028469SLai Jiangshan INIT_LIST_HEAD(&worker->node); 2082e22bee78STejun Heo /* on creation a worker is in !idle && prep state */ 2083e22bee78STejun Heo worker->flags = WORKER_PREP; 2084c8e55f36STejun Heo } 2085c34056a3STejun Heo return worker; 2086c34056a3STejun Heo } 2087c34056a3STejun Heo 20889546b29eSTejun Heo static cpumask_t *pool_allowed_cpus(struct worker_pool *pool) 20899546b29eSTejun Heo { 20908639ecebSTejun Heo if (pool->cpu < 0 && pool->attrs->affn_strict) 20919546b29eSTejun Heo return pool->attrs->__pod_cpumask; 20928639ecebSTejun Heo else 20938639ecebSTejun Heo return pool->attrs->cpumask; 20949546b29eSTejun Heo } 20959546b29eSTejun Heo 2096c34056a3STejun Heo /** 20974736cbf7SLai Jiangshan * worker_attach_to_pool() - attach a worker to a pool 20984736cbf7SLai Jiangshan * @worker: worker to be attached 20994736cbf7SLai Jiangshan * @pool: the target pool 21004736cbf7SLai Jiangshan * 21014736cbf7SLai Jiangshan * Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and 21024736cbf7SLai Jiangshan * cpu-binding of @worker are kept coordinated with the pool across 21034736cbf7SLai Jiangshan * cpu-[un]hotplugs. 21044736cbf7SLai Jiangshan */ 21054736cbf7SLai Jiangshan static void worker_attach_to_pool(struct worker *worker, 21064736cbf7SLai Jiangshan struct worker_pool *pool) 21074736cbf7SLai Jiangshan { 21081258fae7STejun Heo mutex_lock(&wq_pool_attach_mutex); 21094736cbf7SLai Jiangshan 21104736cbf7SLai Jiangshan /* 21111258fae7STejun Heo * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains 21121258fae7STejun Heo * stable across this function. See the comments above the flag 21131258fae7STejun Heo * definition for details. 21144736cbf7SLai Jiangshan */ 21154736cbf7SLai Jiangshan if (pool->flags & POOL_DISASSOCIATED) 21164736cbf7SLai Jiangshan worker->flags |= WORKER_UNBOUND; 21175c25b5ffSPeter Zijlstra else 21185c25b5ffSPeter Zijlstra kthread_set_per_cpu(worker->task, pool->cpu); 21194736cbf7SLai Jiangshan 2120640f17c8SPeter Zijlstra if (worker->rescue_wq) 21219546b29eSTejun Heo set_cpus_allowed_ptr(worker->task, pool_allowed_cpus(pool)); 2122640f17c8SPeter Zijlstra 21234736cbf7SLai Jiangshan list_add_tail(&worker->node, &pool->workers); 2124a2d812a2STejun Heo worker->pool = pool; 21254736cbf7SLai Jiangshan 21261258fae7STejun Heo mutex_unlock(&wq_pool_attach_mutex); 21274736cbf7SLai Jiangshan } 21284736cbf7SLai Jiangshan 21294736cbf7SLai Jiangshan /** 213060f5a4bcSLai Jiangshan * worker_detach_from_pool() - detach a worker from its pool 213160f5a4bcSLai Jiangshan * @worker: worker which is attached to its pool 213260f5a4bcSLai Jiangshan * 21334736cbf7SLai Jiangshan * Undo the attaching which had been done in worker_attach_to_pool(). The 21344736cbf7SLai Jiangshan * caller worker shouldn't access to the pool after detached except it has 21354736cbf7SLai Jiangshan * other reference to the pool. 213660f5a4bcSLai Jiangshan */ 2137a2d812a2STejun Heo static void worker_detach_from_pool(struct worker *worker) 213860f5a4bcSLai Jiangshan { 2139a2d812a2STejun Heo struct worker_pool *pool = worker->pool; 214060f5a4bcSLai Jiangshan struct completion *detach_completion = NULL; 214160f5a4bcSLai Jiangshan 21421258fae7STejun Heo mutex_lock(&wq_pool_attach_mutex); 2143a2d812a2STejun Heo 21445c25b5ffSPeter Zijlstra kthread_set_per_cpu(worker->task, -1); 2145da028469SLai Jiangshan list_del(&worker->node); 2146a2d812a2STejun Heo worker->pool = NULL; 2147a2d812a2STejun Heo 2148e02b9312SValentin Schneider if (list_empty(&pool->workers) && list_empty(&pool->dying_workers)) 214960f5a4bcSLai Jiangshan detach_completion = pool->detach_completion; 21501258fae7STejun Heo mutex_unlock(&wq_pool_attach_mutex); 215160f5a4bcSLai Jiangshan 2152b62c0751SLai Jiangshan /* clear leftover flags without pool->lock after it is detached */ 2153b62c0751SLai Jiangshan worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND); 2154b62c0751SLai Jiangshan 215560f5a4bcSLai Jiangshan if (detach_completion) 215660f5a4bcSLai Jiangshan complete(detach_completion); 215760f5a4bcSLai Jiangshan } 215860f5a4bcSLai Jiangshan 215960f5a4bcSLai Jiangshan /** 2160c34056a3STejun Heo * create_worker - create a new workqueue worker 216163d95a91STejun Heo * @pool: pool the new worker will belong to 2162c34056a3STejun Heo * 2163051e1850SLai Jiangshan * Create and start a new worker which is attached to @pool. 2164c34056a3STejun Heo * 2165c34056a3STejun Heo * CONTEXT: 2166c34056a3STejun Heo * Might sleep. Does GFP_KERNEL allocations. 2167c34056a3STejun Heo * 2168d185af30SYacine Belkadi * Return: 2169c34056a3STejun Heo * Pointer to the newly created worker. 2170c34056a3STejun Heo */ 2171bc2ae0f5STejun Heo static struct worker *create_worker(struct worker_pool *pool) 2172c34056a3STejun Heo { 2173e441b56fSZhen Lei struct worker *worker; 2174e441b56fSZhen Lei int id; 21755d9c7a1eSLucy Mielke char id_buf[23]; 2176c34056a3STejun Heo 21777cda9aaeSLai Jiangshan /* ID is needed to determine kthread name */ 2178e441b56fSZhen Lei id = ida_alloc(&pool->worker_ida, GFP_KERNEL); 21793f0ea0b8SPetr Mladek if (id < 0) { 21803f0ea0b8SPetr Mladek pr_err_once("workqueue: Failed to allocate a worker ID: %pe\n", 21813f0ea0b8SPetr Mladek ERR_PTR(id)); 2182e441b56fSZhen Lei return NULL; 21833f0ea0b8SPetr Mladek } 2184c34056a3STejun Heo 2185f7537df5SLai Jiangshan worker = alloc_worker(pool->node); 21863f0ea0b8SPetr Mladek if (!worker) { 21873f0ea0b8SPetr Mladek pr_err_once("workqueue: Failed to allocate a worker\n"); 2188c34056a3STejun Heo goto fail; 21893f0ea0b8SPetr Mladek } 2190c34056a3STejun Heo 2191c34056a3STejun Heo worker->id = id; 2192c34056a3STejun Heo 219329c91e99STejun Heo if (pool->cpu >= 0) 2194e3c916a4STejun Heo snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id, 2195e3c916a4STejun Heo pool->attrs->nice < 0 ? "H" : ""); 2196f3421797STejun Heo else 2197e3c916a4STejun Heo snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id); 2198e3c916a4STejun Heo 2199f3f90ad4STejun Heo worker->task = kthread_create_on_node(worker_thread, worker, pool->node, 2200e3c916a4STejun Heo "kworker/%s", id_buf); 22013f0ea0b8SPetr Mladek if (IS_ERR(worker->task)) { 220260f54038SPetr Mladek if (PTR_ERR(worker->task) == -EINTR) { 220360f54038SPetr Mladek pr_err("workqueue: Interrupted when creating a worker thread \"kworker/%s\"\n", 220460f54038SPetr Mladek id_buf); 220560f54038SPetr Mladek } else { 22063f0ea0b8SPetr Mladek pr_err_once("workqueue: Failed to create a worker thread: %pe", 22073f0ea0b8SPetr Mladek worker->task); 220860f54038SPetr Mladek } 2209c34056a3STejun Heo goto fail; 22103f0ea0b8SPetr Mladek } 2211c34056a3STejun Heo 221291151228SOleg Nesterov set_user_nice(worker->task, pool->attrs->nice); 22139546b29eSTejun Heo kthread_bind_mask(worker->task, pool_allowed_cpus(pool)); 221491151228SOleg Nesterov 2215da028469SLai Jiangshan /* successful, attach the worker to the pool */ 22164736cbf7SLai Jiangshan worker_attach_to_pool(worker, pool); 2217822d8405STejun Heo 2218051e1850SLai Jiangshan /* start the newly created worker */ 2219a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pool->lock); 22200219a352STejun Heo 2221051e1850SLai Jiangshan worker->pool->nr_workers++; 2222051e1850SLai Jiangshan worker_enter_idle(worker); 22230219a352STejun Heo kick_pool(pool); 22240219a352STejun Heo 22250219a352STejun Heo /* 22260219a352STejun Heo * @worker is waiting on a completion in kthread() and will trigger hung 22270219a352STejun Heo * check if not woken up soon. As kick_pool() might not have waken it 22280219a352STejun Heo * up, wake it up explicitly once more. 22290219a352STejun Heo */ 2230051e1850SLai Jiangshan wake_up_process(worker->task); 22310219a352STejun Heo 2232a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 2233051e1850SLai Jiangshan 2234c34056a3STejun Heo return worker; 2235822d8405STejun Heo 2236c34056a3STejun Heo fail: 2237e441b56fSZhen Lei ida_free(&pool->worker_ida, id); 2238c34056a3STejun Heo kfree(worker); 2239c34056a3STejun Heo return NULL; 2240c34056a3STejun Heo } 2241c34056a3STejun Heo 2242793777bcSValentin Schneider static void unbind_worker(struct worker *worker) 2243793777bcSValentin Schneider { 2244793777bcSValentin Schneider lockdep_assert_held(&wq_pool_attach_mutex); 2245793777bcSValentin Schneider 2246793777bcSValentin Schneider kthread_set_per_cpu(worker->task, -1); 2247793777bcSValentin Schneider if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask)) 2248793777bcSValentin Schneider WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0); 2249793777bcSValentin Schneider else 2250793777bcSValentin Schneider WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0); 2251793777bcSValentin Schneider } 2252793777bcSValentin Schneider 2253e02b9312SValentin Schneider static void wake_dying_workers(struct list_head *cull_list) 2254e02b9312SValentin Schneider { 2255e02b9312SValentin Schneider struct worker *worker, *tmp; 2256e02b9312SValentin Schneider 2257e02b9312SValentin Schneider list_for_each_entry_safe(worker, tmp, cull_list, entry) { 2258e02b9312SValentin Schneider list_del_init(&worker->entry); 2259e02b9312SValentin Schneider unbind_worker(worker); 2260e02b9312SValentin Schneider /* 2261e02b9312SValentin Schneider * If the worker was somehow already running, then it had to be 2262e02b9312SValentin Schneider * in pool->idle_list when set_worker_dying() happened or we 2263e02b9312SValentin Schneider * wouldn't have gotten here. 2264c34056a3STejun Heo * 2265e02b9312SValentin Schneider * Thus, the worker must either have observed the WORKER_DIE 2266e02b9312SValentin Schneider * flag, or have set its state to TASK_IDLE. Either way, the 2267e02b9312SValentin Schneider * below will be observed by the worker and is safe to do 2268e02b9312SValentin Schneider * outside of pool->lock. 2269e02b9312SValentin Schneider */ 2270e02b9312SValentin Schneider wake_up_process(worker->task); 2271e02b9312SValentin Schneider } 2272e02b9312SValentin Schneider } 2273e02b9312SValentin Schneider 2274e02b9312SValentin Schneider /** 2275e02b9312SValentin Schneider * set_worker_dying - Tag a worker for destruction 2276e02b9312SValentin Schneider * @worker: worker to be destroyed 2277e02b9312SValentin Schneider * @list: transfer worker away from its pool->idle_list and into list 2278e02b9312SValentin Schneider * 2279e02b9312SValentin Schneider * Tag @worker for destruction and adjust @pool stats accordingly. The worker 2280e02b9312SValentin Schneider * should be idle. 2281c8e55f36STejun Heo * 2282c8e55f36STejun Heo * CONTEXT: 2283a9b8a985SSebastian Andrzej Siewior * raw_spin_lock_irq(pool->lock). 2284c34056a3STejun Heo */ 2285e02b9312SValentin Schneider static void set_worker_dying(struct worker *worker, struct list_head *list) 2286c34056a3STejun Heo { 2287bd7bdd43STejun Heo struct worker_pool *pool = worker->pool; 2288c34056a3STejun Heo 2289cd549687STejun Heo lockdep_assert_held(&pool->lock); 2290e02b9312SValentin Schneider lockdep_assert_held(&wq_pool_attach_mutex); 2291cd549687STejun Heo 2292c34056a3STejun Heo /* sanity check frenzy */ 22936183c009STejun Heo if (WARN_ON(worker->current_work) || 229473eb7fe7SLai Jiangshan WARN_ON(!list_empty(&worker->scheduled)) || 229573eb7fe7SLai Jiangshan WARN_ON(!(worker->flags & WORKER_IDLE))) 22966183c009STejun Heo return; 2297c34056a3STejun Heo 2298bd7bdd43STejun Heo pool->nr_workers--; 2299bd7bdd43STejun Heo pool->nr_idle--; 2300c8e55f36STejun Heo 2301cb444766STejun Heo worker->flags |= WORKER_DIE; 2302e02b9312SValentin Schneider 2303e02b9312SValentin Schneider list_move(&worker->entry, list); 2304e02b9312SValentin Schneider list_move(&worker->node, &pool->dying_workers); 2305c34056a3STejun Heo } 2306c34056a3STejun Heo 23073f959aa3SValentin Schneider /** 23083f959aa3SValentin Schneider * idle_worker_timeout - check if some idle workers can now be deleted. 23093f959aa3SValentin Schneider * @t: The pool's idle_timer that just expired 23103f959aa3SValentin Schneider * 23113f959aa3SValentin Schneider * The timer is armed in worker_enter_idle(). Note that it isn't disarmed in 23123f959aa3SValentin Schneider * worker_leave_idle(), as a worker flicking between idle and active while its 23133f959aa3SValentin Schneider * pool is at the too_many_workers() tipping point would cause too much timer 23143f959aa3SValentin Schneider * housekeeping overhead. Since IDLE_WORKER_TIMEOUT is long enough, we just let 23153f959aa3SValentin Schneider * it expire and re-evaluate things from there. 23163f959aa3SValentin Schneider */ 231732a6c723SKees Cook static void idle_worker_timeout(struct timer_list *t) 2318e22bee78STejun Heo { 231932a6c723SKees Cook struct worker_pool *pool = from_timer(pool, t, idle_timer); 23203f959aa3SValentin Schneider bool do_cull = false; 23213f959aa3SValentin Schneider 23223f959aa3SValentin Schneider if (work_pending(&pool->idle_cull_work)) 23233f959aa3SValentin Schneider return; 23243f959aa3SValentin Schneider 23253f959aa3SValentin Schneider raw_spin_lock_irq(&pool->lock); 23263f959aa3SValentin Schneider 23273f959aa3SValentin Schneider if (too_many_workers(pool)) { 23283f959aa3SValentin Schneider struct worker *worker; 23293f959aa3SValentin Schneider unsigned long expires; 23303f959aa3SValentin Schneider 23313f959aa3SValentin Schneider /* idle_list is kept in LIFO order, check the last one */ 23323f959aa3SValentin Schneider worker = list_entry(pool->idle_list.prev, struct worker, entry); 23333f959aa3SValentin Schneider expires = worker->last_active + IDLE_WORKER_TIMEOUT; 23343f959aa3SValentin Schneider do_cull = !time_before(jiffies, expires); 23353f959aa3SValentin Schneider 23363f959aa3SValentin Schneider if (!do_cull) 23373f959aa3SValentin Schneider mod_timer(&pool->idle_timer, expires); 23383f959aa3SValentin Schneider } 23393f959aa3SValentin Schneider raw_spin_unlock_irq(&pool->lock); 23403f959aa3SValentin Schneider 23413f959aa3SValentin Schneider if (do_cull) 23423f959aa3SValentin Schneider queue_work(system_unbound_wq, &pool->idle_cull_work); 23433f959aa3SValentin Schneider } 23443f959aa3SValentin Schneider 23453f959aa3SValentin Schneider /** 23463f959aa3SValentin Schneider * idle_cull_fn - cull workers that have been idle for too long. 23473f959aa3SValentin Schneider * @work: the pool's work for handling these idle workers 23483f959aa3SValentin Schneider * 23493f959aa3SValentin Schneider * This goes through a pool's idle workers and gets rid of those that have been 23503f959aa3SValentin Schneider * idle for at least IDLE_WORKER_TIMEOUT seconds. 2351e02b9312SValentin Schneider * 2352e02b9312SValentin Schneider * We don't want to disturb isolated CPUs because of a pcpu kworker being 2353e02b9312SValentin Schneider * culled, so this also resets worker affinity. This requires a sleepable 2354e02b9312SValentin Schneider * context, hence the split between timer callback and work item. 23553f959aa3SValentin Schneider */ 23563f959aa3SValentin Schneider static void idle_cull_fn(struct work_struct *work) 23573f959aa3SValentin Schneider { 23583f959aa3SValentin Schneider struct worker_pool *pool = container_of(work, struct worker_pool, idle_cull_work); 23599680540cSYang Yingliang LIST_HEAD(cull_list); 2360e22bee78STejun Heo 2361e02b9312SValentin Schneider /* 2362e02b9312SValentin Schneider * Grabbing wq_pool_attach_mutex here ensures an already-running worker 2363e02b9312SValentin Schneider * cannot proceed beyong worker_detach_from_pool() in its self-destruct 2364e02b9312SValentin Schneider * path. This is required as a previously-preempted worker could run after 2365e02b9312SValentin Schneider * set_worker_dying() has happened but before wake_dying_workers() did. 2366e02b9312SValentin Schneider */ 2367e02b9312SValentin Schneider mutex_lock(&wq_pool_attach_mutex); 2368a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pool->lock); 2369e22bee78STejun Heo 23703347fc9fSLai Jiangshan while (too_many_workers(pool)) { 2371e22bee78STejun Heo struct worker *worker; 2372e22bee78STejun Heo unsigned long expires; 2373e22bee78STejun Heo 237463d95a91STejun Heo worker = list_entry(pool->idle_list.prev, struct worker, entry); 2375e22bee78STejun Heo expires = worker->last_active + IDLE_WORKER_TIMEOUT; 2376e22bee78STejun Heo 23773347fc9fSLai Jiangshan if (time_before(jiffies, expires)) { 237863d95a91STejun Heo mod_timer(&pool->idle_timer, expires); 23793347fc9fSLai Jiangshan break; 2380e22bee78STejun Heo } 23813347fc9fSLai Jiangshan 2382e02b9312SValentin Schneider set_worker_dying(worker, &cull_list); 2383e22bee78STejun Heo } 2384e22bee78STejun Heo 2385a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 2386e02b9312SValentin Schneider wake_dying_workers(&cull_list); 2387e02b9312SValentin Schneider mutex_unlock(&wq_pool_attach_mutex); 2388e22bee78STejun Heo } 2389e22bee78STejun Heo 2390493a1724STejun Heo static void send_mayday(struct work_struct *work) 2391e22bee78STejun Heo { 2392112202d9STejun Heo struct pool_workqueue *pwq = get_work_pwq(work); 2393112202d9STejun Heo struct workqueue_struct *wq = pwq->wq; 2394493a1724STejun Heo 23952e109a28STejun Heo lockdep_assert_held(&wq_mayday_lock); 2396e22bee78STejun Heo 2397493008a8STejun Heo if (!wq->rescuer) 2398493a1724STejun Heo return; 2399e22bee78STejun Heo 2400e22bee78STejun Heo /* mayday mayday mayday */ 2401493a1724STejun Heo if (list_empty(&pwq->mayday_node)) { 240277668c8bSLai Jiangshan /* 240377668c8bSLai Jiangshan * If @pwq is for an unbound wq, its base ref may be put at 240477668c8bSLai Jiangshan * any time due to an attribute change. Pin @pwq until the 240577668c8bSLai Jiangshan * rescuer is done with it. 240677668c8bSLai Jiangshan */ 240777668c8bSLai Jiangshan get_pwq(pwq); 2408493a1724STejun Heo list_add_tail(&pwq->mayday_node, &wq->maydays); 2409e22bee78STejun Heo wake_up_process(wq->rescuer->task); 2410725e8ec5STejun Heo pwq->stats[PWQ_STAT_MAYDAY]++; 2411493a1724STejun Heo } 2412e22bee78STejun Heo } 2413e22bee78STejun Heo 241432a6c723SKees Cook static void pool_mayday_timeout(struct timer_list *t) 2415e22bee78STejun Heo { 241632a6c723SKees Cook struct worker_pool *pool = from_timer(pool, t, mayday_timer); 2417e22bee78STejun Heo struct work_struct *work; 2418e22bee78STejun Heo 2419a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pool->lock); 2420a9b8a985SSebastian Andrzej Siewior raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */ 2421e22bee78STejun Heo 242263d95a91STejun Heo if (need_to_create_worker(pool)) { 2423e22bee78STejun Heo /* 2424e22bee78STejun Heo * We've been trying to create a new worker but 2425e22bee78STejun Heo * haven't been successful. We might be hitting an 2426e22bee78STejun Heo * allocation deadlock. Send distress signals to 2427e22bee78STejun Heo * rescuers. 2428e22bee78STejun Heo */ 242963d95a91STejun Heo list_for_each_entry(work, &pool->worklist, entry) 2430e22bee78STejun Heo send_mayday(work); 2431e22bee78STejun Heo } 2432e22bee78STejun Heo 2433a9b8a985SSebastian Andrzej Siewior raw_spin_unlock(&wq_mayday_lock); 2434a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 2435e22bee78STejun Heo 243663d95a91STejun Heo mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); 2437e22bee78STejun Heo } 2438e22bee78STejun Heo 2439e22bee78STejun Heo /** 2440e22bee78STejun Heo * maybe_create_worker - create a new worker if necessary 244163d95a91STejun Heo * @pool: pool to create a new worker for 2442e22bee78STejun Heo * 244363d95a91STejun Heo * Create a new worker for @pool if necessary. @pool is guaranteed to 2444e22bee78STejun Heo * have at least one idle worker on return from this function. If 2445e22bee78STejun Heo * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is 244663d95a91STejun Heo * sent to all rescuers with works scheduled on @pool to resolve 2447e22bee78STejun Heo * possible allocation deadlock. 2448e22bee78STejun Heo * 2449c5aa87bbSTejun Heo * On return, need_to_create_worker() is guaranteed to be %false and 2450c5aa87bbSTejun Heo * may_start_working() %true. 2451e22bee78STejun Heo * 2452e22bee78STejun Heo * LOCKING: 2453a9b8a985SSebastian Andrzej Siewior * raw_spin_lock_irq(pool->lock) which may be released and regrabbed 2454e22bee78STejun Heo * multiple times. Does GFP_KERNEL allocations. Called only from 2455e22bee78STejun Heo * manager. 2456e22bee78STejun Heo */ 245729187a9eSTejun Heo static void maybe_create_worker(struct worker_pool *pool) 2458d565ed63STejun Heo __releases(&pool->lock) 2459d565ed63STejun Heo __acquires(&pool->lock) 2460e22bee78STejun Heo { 2461e22bee78STejun Heo restart: 2462a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 24639f9c2364STejun Heo 2464e22bee78STejun Heo /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ 246563d95a91STejun Heo mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); 2466e22bee78STejun Heo 2467e22bee78STejun Heo while (true) { 2468051e1850SLai Jiangshan if (create_worker(pool) || !need_to_create_worker(pool)) 2469e22bee78STejun Heo break; 2470e22bee78STejun Heo 2471e212f361SLai Jiangshan schedule_timeout_interruptible(CREATE_COOLDOWN); 24729f9c2364STejun Heo 247363d95a91STejun Heo if (!need_to_create_worker(pool)) 2474e22bee78STejun Heo break; 2475e22bee78STejun Heo } 2476e22bee78STejun Heo 247763d95a91STejun Heo del_timer_sync(&pool->mayday_timer); 2478a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pool->lock); 2479051e1850SLai Jiangshan /* 2480051e1850SLai Jiangshan * This is necessary even after a new worker was just successfully 2481051e1850SLai Jiangshan * created as @pool->lock was dropped and the new worker might have 2482051e1850SLai Jiangshan * already become busy. 2483051e1850SLai Jiangshan */ 248463d95a91STejun Heo if (need_to_create_worker(pool)) 2485e22bee78STejun Heo goto restart; 2486e22bee78STejun Heo } 2487e22bee78STejun Heo 2488e22bee78STejun Heo /** 2489e22bee78STejun Heo * manage_workers - manage worker pool 2490e22bee78STejun Heo * @worker: self 2491e22bee78STejun Heo * 2492706026c2STejun Heo * Assume the manager role and manage the worker pool @worker belongs 2493e22bee78STejun Heo * to. At any given time, there can be only zero or one manager per 2494706026c2STejun Heo * pool. The exclusion is handled automatically by this function. 2495e22bee78STejun Heo * 2496e22bee78STejun Heo * The caller can safely start processing works on false return. On 2497e22bee78STejun Heo * true return, it's guaranteed that need_to_create_worker() is false 2498e22bee78STejun Heo * and may_start_working() is true. 2499e22bee78STejun Heo * 2500e22bee78STejun Heo * CONTEXT: 2501a9b8a985SSebastian Andrzej Siewior * raw_spin_lock_irq(pool->lock) which may be released and regrabbed 2502e22bee78STejun Heo * multiple times. Does GFP_KERNEL allocations. 2503e22bee78STejun Heo * 2504d185af30SYacine Belkadi * Return: 250529187a9eSTejun Heo * %false if the pool doesn't need management and the caller can safely 250629187a9eSTejun Heo * start processing works, %true if management function was performed and 250729187a9eSTejun Heo * the conditions that the caller verified before calling the function may 250829187a9eSTejun Heo * no longer be true. 2509e22bee78STejun Heo */ 2510e22bee78STejun Heo static bool manage_workers(struct worker *worker) 2511e22bee78STejun Heo { 251263d95a91STejun Heo struct worker_pool *pool = worker->pool; 2513e22bee78STejun Heo 2514692b4825STejun Heo if (pool->flags & POOL_MANAGER_ACTIVE) 251529187a9eSTejun Heo return false; 2516692b4825STejun Heo 2517692b4825STejun Heo pool->flags |= POOL_MANAGER_ACTIVE; 25182607d7a6STejun Heo pool->manager = worker; 2519e22bee78STejun Heo 252029187a9eSTejun Heo maybe_create_worker(pool); 2521e22bee78STejun Heo 25222607d7a6STejun Heo pool->manager = NULL; 2523692b4825STejun Heo pool->flags &= ~POOL_MANAGER_ACTIVE; 2524d8bb65abSSebastian Andrzej Siewior rcuwait_wake_up(&manager_wait); 252529187a9eSTejun Heo return true; 2526e22bee78STejun Heo } 2527e22bee78STejun Heo 2528a62428c0STejun Heo /** 2529a62428c0STejun Heo * process_one_work - process single work 2530c34056a3STejun Heo * @worker: self 2531a62428c0STejun Heo * @work: work to process 2532a62428c0STejun Heo * 2533a62428c0STejun Heo * Process @work. This function contains all the logics necessary to 2534a62428c0STejun Heo * process a single work including synchronization against and 2535a62428c0STejun Heo * interaction with other workers on the same cpu, queueing and 2536a62428c0STejun Heo * flushing. As long as context requirement is met, any worker can 2537a62428c0STejun Heo * call this function to process a work. 2538a62428c0STejun Heo * 2539a62428c0STejun Heo * CONTEXT: 2540a9b8a985SSebastian Andrzej Siewior * raw_spin_lock_irq(pool->lock) which is released and regrabbed. 2541a62428c0STejun Heo */ 2542c34056a3STejun Heo static void process_one_work(struct worker *worker, struct work_struct *work) 2543d565ed63STejun Heo __releases(&pool->lock) 2544d565ed63STejun Heo __acquires(&pool->lock) 25451da177e4SLinus Torvalds { 2546112202d9STejun Heo struct pool_workqueue *pwq = get_work_pwq(work); 2547bd7bdd43STejun Heo struct worker_pool *pool = worker->pool; 2548c4560c2cSLai Jiangshan unsigned long work_data; 25494e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP 25504e6045f1SJohannes Berg /* 2551a62428c0STejun Heo * It is permissible to free the struct work_struct from 2552a62428c0STejun Heo * inside the function that is called from it, this we need to 2553a62428c0STejun Heo * take into account for lockdep too. To avoid bogus "held 2554a62428c0STejun Heo * lock freed" warnings as well as problems when looking into 2555a62428c0STejun Heo * work->lockdep_map, make a copy and use that here. 25564e6045f1SJohannes Berg */ 25574d82a1deSPeter Zijlstra struct lockdep_map lockdep_map; 25584d82a1deSPeter Zijlstra 25594d82a1deSPeter Zijlstra lockdep_copy_map(&lockdep_map, &work->lockdep_map); 25604e6045f1SJohannes Berg #endif 2561807407c0SLai Jiangshan /* ensure we're on the correct CPU */ 256285327af6SLai Jiangshan WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && 2563ec22ca5eSTejun Heo raw_smp_processor_id() != pool->cpu); 256425511a47STejun Heo 25658930cabaSTejun Heo /* claim and dequeue */ 2566dc186ad7SThomas Gleixner debug_work_deactivate(work); 2567c9e7cf27STejun Heo hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); 2568c34056a3STejun Heo worker->current_work = work; 2569a2c1c57bSTejun Heo worker->current_func = work->func; 2570112202d9STejun Heo worker->current_pwq = pwq; 2571616db877STejun Heo worker->current_at = worker->task->se.sum_exec_runtime; 2572c4560c2cSLai Jiangshan work_data = *work_data_bits(work); 2573d812796eSLai Jiangshan worker->current_color = get_work_color(work_data); 25747a22ad75STejun Heo 25758bf89593STejun Heo /* 25768bf89593STejun Heo * Record wq name for cmdline and debug reporting, may get 25778bf89593STejun Heo * overridden through set_worker_desc(). 25788bf89593STejun Heo */ 25798bf89593STejun Heo strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN); 25808bf89593STejun Heo 2581a62428c0STejun Heo list_del_init(&work->entry); 2582a62428c0STejun Heo 2583649027d7STejun Heo /* 2584228f1d00SLai Jiangshan * CPU intensive works don't participate in concurrency management. 2585228f1d00SLai Jiangshan * They're the scheduler's responsibility. This takes @worker out 2586228f1d00SLai Jiangshan * of concurrency management and the next code block will chain 2587228f1d00SLai Jiangshan * execution of the pending work items. 2588fb0e7bebSTejun Heo */ 2589616db877STejun Heo if (unlikely(pwq->wq->flags & WQ_CPU_INTENSIVE)) 2590228f1d00SLai Jiangshan worker_set_flags(worker, WORKER_CPU_INTENSIVE); 2591fb0e7bebSTejun Heo 2592974271c4STejun Heo /* 25930219a352STejun Heo * Kick @pool if necessary. It's always noop for per-cpu worker pools 25940219a352STejun Heo * since nr_running would always be >= 1 at this point. This is used to 25950219a352STejun Heo * chain execution of the pending work items for WORKER_NOT_RUNNING 25960219a352STejun Heo * workers such as the UNBOUND and CPU_INTENSIVE ones. 2597974271c4STejun Heo */ 25980219a352STejun Heo kick_pool(pool); 2599974271c4STejun Heo 26008930cabaSTejun Heo /* 26017c3eed5cSTejun Heo * Record the last pool and clear PENDING which should be the last 2602d565ed63STejun Heo * update to @work. Also, do this inside @pool->lock so that 260323657bb1STejun Heo * PENDING and queued state changes happen together while IRQ is 260423657bb1STejun Heo * disabled. 26058930cabaSTejun Heo */ 26067c3eed5cSTejun Heo set_work_pool_and_clear_pending(work, pool->id); 26071da177e4SLinus Torvalds 2608fe48ba7dSMirsad Goran Todorovac pwq->stats[PWQ_STAT_STARTED]++; 2609a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 2610365970a1SDavid Howells 2611a1d14934SPeter Zijlstra lock_map_acquire(&pwq->wq->lockdep_map); 26123295f0efSIngo Molnar lock_map_acquire(&lockdep_map); 2613e6f3faa7SPeter Zijlstra /* 2614f52be570SPeter Zijlstra * Strictly speaking we should mark the invariant state without holding 2615f52be570SPeter Zijlstra * any locks, that is, before these two lock_map_acquire()'s. 2616e6f3faa7SPeter Zijlstra * 2617e6f3faa7SPeter Zijlstra * However, that would result in: 2618e6f3faa7SPeter Zijlstra * 2619e6f3faa7SPeter Zijlstra * A(W1) 2620e6f3faa7SPeter Zijlstra * WFC(C) 2621e6f3faa7SPeter Zijlstra * A(W1) 2622e6f3faa7SPeter Zijlstra * C(C) 2623e6f3faa7SPeter Zijlstra * 2624e6f3faa7SPeter Zijlstra * Which would create W1->C->W1 dependencies, even though there is no 2625e6f3faa7SPeter Zijlstra * actual deadlock possible. There are two solutions, using a 2626e6f3faa7SPeter Zijlstra * read-recursive acquire on the work(queue) 'locks', but this will then 2627f52be570SPeter Zijlstra * hit the lockdep limitation on recursive locks, or simply discard 2628e6f3faa7SPeter Zijlstra * these locks. 2629e6f3faa7SPeter Zijlstra * 2630e6f3faa7SPeter Zijlstra * AFAICT there is no possible deadlock scenario between the 2631e6f3faa7SPeter Zijlstra * flush_work() and complete() primitives (except for single-threaded 2632e6f3faa7SPeter Zijlstra * workqueues), so hiding them isn't a problem. 2633e6f3faa7SPeter Zijlstra */ 2634f52be570SPeter Zijlstra lockdep_invariant_state(true); 2635e36c886aSArjan van de Ven trace_workqueue_execute_start(work); 2636a2c1c57bSTejun Heo worker->current_func(work); 2637e36c886aSArjan van de Ven /* 2638e36c886aSArjan van de Ven * While we must be careful to not use "work" after this, the trace 2639e36c886aSArjan van de Ven * point will only record its address. 2640e36c886aSArjan van de Ven */ 26411c5da0ecSDaniel Jordan trace_workqueue_execute_end(work, worker->current_func); 2642725e8ec5STejun Heo pwq->stats[PWQ_STAT_COMPLETED]++; 26433295f0efSIngo Molnar lock_map_release(&lockdep_map); 2644112202d9STejun Heo lock_map_release(&pwq->wq->lockdep_map); 26451da177e4SLinus Torvalds 2646d5abe669SPeter Zijlstra if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 2647044c782cSValentin Ilie pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" 2648d75f773cSSakari Ailus " last function: %ps\n", 2649a2c1c57bSTejun Heo current->comm, preempt_count(), task_pid_nr(current), 2650a2c1c57bSTejun Heo worker->current_func); 2651d5abe669SPeter Zijlstra debug_show_held_locks(current); 2652d5abe669SPeter Zijlstra dump_stack(); 2653d5abe669SPeter Zijlstra } 2654d5abe669SPeter Zijlstra 2655b22ce278STejun Heo /* 2656025f50f3SSebastian Andrzej Siewior * The following prevents a kworker from hogging CPU on !PREEMPTION 2657b22ce278STejun Heo * kernels, where a requeueing work item waiting for something to 2658b22ce278STejun Heo * happen could deadlock with stop_machine as such work item could 2659b22ce278STejun Heo * indefinitely requeue itself while all other CPUs are trapped in 2660789cbbecSJoe Lawrence * stop_machine. At the same time, report a quiescent RCU state so 2661789cbbecSJoe Lawrence * the same condition doesn't freeze RCU. 2662b22ce278STejun Heo */ 2663a7e6425eSPaul E. McKenney cond_resched(); 2664b22ce278STejun Heo 2665a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pool->lock); 2666a62428c0STejun Heo 2667616db877STejun Heo /* 2668616db877STejun Heo * In addition to %WQ_CPU_INTENSIVE, @worker may also have been marked 2669616db877STejun Heo * CPU intensive by wq_worker_tick() if @work hogged CPU longer than 2670616db877STejun Heo * wq_cpu_intensive_thresh_us. Clear it. 2671616db877STejun Heo */ 2672fb0e7bebSTejun Heo worker_clr_flags(worker, WORKER_CPU_INTENSIVE); 2673fb0e7bebSTejun Heo 26741b69ac6bSJohannes Weiner /* tag the worker for identification in schedule() */ 26751b69ac6bSJohannes Weiner worker->last_func = worker->current_func; 26761b69ac6bSJohannes Weiner 2677a62428c0STejun Heo /* we're done with it, release */ 267842f8570fSSasha Levin hash_del(&worker->hentry); 2679c34056a3STejun Heo worker->current_work = NULL; 2680a2c1c57bSTejun Heo worker->current_func = NULL; 2681112202d9STejun Heo worker->current_pwq = NULL; 2682d812796eSLai Jiangshan worker->current_color = INT_MAX; 2683c4560c2cSLai Jiangshan pwq_dec_nr_in_flight(pwq, work_data); 26841da177e4SLinus Torvalds } 26851da177e4SLinus Torvalds 2686affee4b2STejun Heo /** 2687affee4b2STejun Heo * process_scheduled_works - process scheduled works 2688affee4b2STejun Heo * @worker: self 2689affee4b2STejun Heo * 2690affee4b2STejun Heo * Process all scheduled works. Please note that the scheduled list 2691affee4b2STejun Heo * may change while processing a work, so this function repeatedly 2692affee4b2STejun Heo * fetches a work from the top and executes it. 2693affee4b2STejun Heo * 2694affee4b2STejun Heo * CONTEXT: 2695a9b8a985SSebastian Andrzej Siewior * raw_spin_lock_irq(pool->lock) which may be released and regrabbed 2696affee4b2STejun Heo * multiple times. 2697affee4b2STejun Heo */ 2698affee4b2STejun Heo static void process_scheduled_works(struct worker *worker) 26991da177e4SLinus Torvalds { 2700c0ab017dSTejun Heo struct work_struct *work; 2701c0ab017dSTejun Heo bool first = true; 2702c0ab017dSTejun Heo 2703c0ab017dSTejun Heo while ((work = list_first_entry_or_null(&worker->scheduled, 2704c0ab017dSTejun Heo struct work_struct, entry))) { 2705c0ab017dSTejun Heo if (first) { 2706c0ab017dSTejun Heo worker->pool->watchdog_ts = jiffies; 2707c0ab017dSTejun Heo first = false; 2708c0ab017dSTejun Heo } 2709c34056a3STejun Heo process_one_work(worker, work); 2710a62428c0STejun Heo } 27111da177e4SLinus Torvalds } 27121da177e4SLinus Torvalds 2713197f6accSTejun Heo static void set_pf_worker(bool val) 2714197f6accSTejun Heo { 2715197f6accSTejun Heo mutex_lock(&wq_pool_attach_mutex); 2716197f6accSTejun Heo if (val) 2717197f6accSTejun Heo current->flags |= PF_WQ_WORKER; 2718197f6accSTejun Heo else 2719197f6accSTejun Heo current->flags &= ~PF_WQ_WORKER; 2720197f6accSTejun Heo mutex_unlock(&wq_pool_attach_mutex); 2721197f6accSTejun Heo } 2722197f6accSTejun Heo 27234690c4abSTejun Heo /** 27244690c4abSTejun Heo * worker_thread - the worker thread function 2725c34056a3STejun Heo * @__worker: self 27264690c4abSTejun Heo * 2727c5aa87bbSTejun Heo * The worker thread function. All workers belong to a worker_pool - 2728c5aa87bbSTejun Heo * either a per-cpu one or dynamic unbound one. These workers process all 2729c5aa87bbSTejun Heo * work items regardless of their specific target workqueue. The only 2730c5aa87bbSTejun Heo * exception is work items which belong to workqueues with a rescuer which 2731c5aa87bbSTejun Heo * will be explained in rescuer_thread(). 2732d185af30SYacine Belkadi * 2733d185af30SYacine Belkadi * Return: 0 27344690c4abSTejun Heo */ 2735c34056a3STejun Heo static int worker_thread(void *__worker) 27361da177e4SLinus Torvalds { 2737c34056a3STejun Heo struct worker *worker = __worker; 2738bd7bdd43STejun Heo struct worker_pool *pool = worker->pool; 27391da177e4SLinus Torvalds 2740e22bee78STejun Heo /* tell the scheduler that this is a workqueue worker */ 2741197f6accSTejun Heo set_pf_worker(true); 2742c8e55f36STejun Heo woke_up: 2743a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pool->lock); 2744affee4b2STejun Heo 2745a9ab775bSTejun Heo /* am I supposed to die? */ 2746a9ab775bSTejun Heo if (unlikely(worker->flags & WORKER_DIE)) { 2747a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 2748197f6accSTejun Heo set_pf_worker(false); 274960f5a4bcSLai Jiangshan 275060f5a4bcSLai Jiangshan set_task_comm(worker->task, "kworker/dying"); 2751e441b56fSZhen Lei ida_free(&pool->worker_ida, worker->id); 2752a2d812a2STejun Heo worker_detach_from_pool(worker); 2753e02b9312SValentin Schneider WARN_ON_ONCE(!list_empty(&worker->entry)); 275460f5a4bcSLai Jiangshan kfree(worker); 2755c8e55f36STejun Heo return 0; 2756c8e55f36STejun Heo } 2757c8e55f36STejun Heo 2758c8e55f36STejun Heo worker_leave_idle(worker); 2759db7bccf4STejun Heo recheck: 2760e22bee78STejun Heo /* no more worker necessary? */ 276163d95a91STejun Heo if (!need_more_worker(pool)) 2762e22bee78STejun Heo goto sleep; 2763e22bee78STejun Heo 2764e22bee78STejun Heo /* do we need to manage? */ 276563d95a91STejun Heo if (unlikely(!may_start_working(pool)) && manage_workers(worker)) 2766e22bee78STejun Heo goto recheck; 2767e22bee78STejun Heo 2768c8e55f36STejun Heo /* 2769c8e55f36STejun Heo * ->scheduled list can only be filled while a worker is 2770c8e55f36STejun Heo * preparing to process a work or actually processing it. 2771c8e55f36STejun Heo * Make sure nobody diddled with it while I was sleeping. 2772c8e55f36STejun Heo */ 27736183c009STejun Heo WARN_ON_ONCE(!list_empty(&worker->scheduled)); 2774c8e55f36STejun Heo 2775e22bee78STejun Heo /* 2776a9ab775bSTejun Heo * Finish PREP stage. We're guaranteed to have at least one idle 2777a9ab775bSTejun Heo * worker or that someone else has already assumed the manager 2778a9ab775bSTejun Heo * role. This is where @worker starts participating in concurrency 2779a9ab775bSTejun Heo * management if applicable and concurrency management is restored 2780a9ab775bSTejun Heo * after being rebound. See rebind_workers() for details. 2781e22bee78STejun Heo */ 2782a9ab775bSTejun Heo worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND); 2783e22bee78STejun Heo 2784e22bee78STejun Heo do { 2785affee4b2STejun Heo struct work_struct *work = 2786bd7bdd43STejun Heo list_first_entry(&pool->worklist, 2787affee4b2STejun Heo struct work_struct, entry); 2788affee4b2STejun Heo 2789873eaca6STejun Heo if (assign_work(work, worker, NULL)) 2790affee4b2STejun Heo process_scheduled_works(worker); 279163d95a91STejun Heo } while (keep_working(pool)); 2792affee4b2STejun Heo 2793228f1d00SLai Jiangshan worker_set_flags(worker, WORKER_PREP); 2794d313dd85STejun Heo sleep: 2795c8e55f36STejun Heo /* 2796d565ed63STejun Heo * pool->lock is held and there's no work to process and no need to 2797d565ed63STejun Heo * manage, sleep. Workers are woken up only while holding 2798d565ed63STejun Heo * pool->lock or from local cpu, so setting the current state 2799d565ed63STejun Heo * before releasing pool->lock is enough to prevent losing any 2800d565ed63STejun Heo * event. 2801c8e55f36STejun Heo */ 2802c8e55f36STejun Heo worker_enter_idle(worker); 2803c5a94a61SPeter Zijlstra __set_current_state(TASK_IDLE); 2804a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 28051da177e4SLinus Torvalds schedule(); 2806c8e55f36STejun Heo goto woke_up; 28071da177e4SLinus Torvalds } 28081da177e4SLinus Torvalds 2809e22bee78STejun Heo /** 2810e22bee78STejun Heo * rescuer_thread - the rescuer thread function 2811111c225aSTejun Heo * @__rescuer: self 2812e22bee78STejun Heo * 2813e22bee78STejun Heo * Workqueue rescuer thread function. There's one rescuer for each 2814493008a8STejun Heo * workqueue which has WQ_MEM_RECLAIM set. 2815e22bee78STejun Heo * 2816706026c2STejun Heo * Regular work processing on a pool may block trying to create a new 2817e22bee78STejun Heo * worker which uses GFP_KERNEL allocation which has slight chance of 2818e22bee78STejun Heo * developing into deadlock if some works currently on the same queue 2819e22bee78STejun Heo * need to be processed to satisfy the GFP_KERNEL allocation. This is 2820e22bee78STejun Heo * the problem rescuer solves. 2821e22bee78STejun Heo * 2822706026c2STejun Heo * When such condition is possible, the pool summons rescuers of all 2823706026c2STejun Heo * workqueues which have works queued on the pool and let them process 2824e22bee78STejun Heo * those works so that forward progress can be guaranteed. 2825e22bee78STejun Heo * 2826e22bee78STejun Heo * This should happen rarely. 2827d185af30SYacine Belkadi * 2828d185af30SYacine Belkadi * Return: 0 2829e22bee78STejun Heo */ 2830111c225aSTejun Heo static int rescuer_thread(void *__rescuer) 2831e22bee78STejun Heo { 2832111c225aSTejun Heo struct worker *rescuer = __rescuer; 2833111c225aSTejun Heo struct workqueue_struct *wq = rescuer->rescue_wq; 28344d595b86SLai Jiangshan bool should_stop; 2835e22bee78STejun Heo 2836e22bee78STejun Heo set_user_nice(current, RESCUER_NICE_LEVEL); 2837111c225aSTejun Heo 2838111c225aSTejun Heo /* 2839111c225aSTejun Heo * Mark rescuer as worker too. As WORKER_PREP is never cleared, it 2840111c225aSTejun Heo * doesn't participate in concurrency management. 2841111c225aSTejun Heo */ 2842197f6accSTejun Heo set_pf_worker(true); 2843e22bee78STejun Heo repeat: 2844c5a94a61SPeter Zijlstra set_current_state(TASK_IDLE); 28451da177e4SLinus Torvalds 28464d595b86SLai Jiangshan /* 28474d595b86SLai Jiangshan * By the time the rescuer is requested to stop, the workqueue 28484d595b86SLai Jiangshan * shouldn't have any work pending, but @wq->maydays may still have 28494d595b86SLai Jiangshan * pwq(s) queued. This can happen by non-rescuer workers consuming 28504d595b86SLai Jiangshan * all the work items before the rescuer got to them. Go through 28514d595b86SLai Jiangshan * @wq->maydays processing before acting on should_stop so that the 28524d595b86SLai Jiangshan * list is always empty on exit. 28534d595b86SLai Jiangshan */ 28544d595b86SLai Jiangshan should_stop = kthread_should_stop(); 28551da177e4SLinus Torvalds 2856493a1724STejun Heo /* see whether any pwq is asking for help */ 2857a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&wq_mayday_lock); 2858493a1724STejun Heo 2859493a1724STejun Heo while (!list_empty(&wq->maydays)) { 2860493a1724STejun Heo struct pool_workqueue *pwq = list_first_entry(&wq->maydays, 2861493a1724STejun Heo struct pool_workqueue, mayday_node); 2862112202d9STejun Heo struct worker_pool *pool = pwq->pool; 2863e22bee78STejun Heo struct work_struct *work, *n; 2864e22bee78STejun Heo 2865e22bee78STejun Heo __set_current_state(TASK_RUNNING); 2866493a1724STejun Heo list_del_init(&pwq->mayday_node); 2867493a1724STejun Heo 2868a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&wq_mayday_lock); 2869e22bee78STejun Heo 287051697d39SLai Jiangshan worker_attach_to_pool(rescuer, pool); 287151697d39SLai Jiangshan 2872a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pool->lock); 2873e22bee78STejun Heo 2874e22bee78STejun Heo /* 2875e22bee78STejun Heo * Slurp in all works issued via this workqueue and 2876e22bee78STejun Heo * process'em. 2877e22bee78STejun Heo */ 2878873eaca6STejun Heo WARN_ON_ONCE(!list_empty(&rescuer->scheduled)); 287982607adcSTejun Heo list_for_each_entry_safe(work, n, &pool->worklist, entry) { 2880873eaca6STejun Heo if (get_work_pwq(work) == pwq && 2881873eaca6STejun Heo assign_work(work, rescuer, &n)) 2882725e8ec5STejun Heo pwq->stats[PWQ_STAT_RESCUED]++; 288382607adcSTejun Heo } 2884e22bee78STejun Heo 2885873eaca6STejun Heo if (!list_empty(&rescuer->scheduled)) { 2886e22bee78STejun Heo process_scheduled_works(rescuer); 28877576958aSTejun Heo 28887576958aSTejun Heo /* 2889008847f6SNeilBrown * The above execution of rescued work items could 2890008847f6SNeilBrown * have created more to rescue through 2891f97a4a1aSLai Jiangshan * pwq_activate_first_inactive() or chained 2892008847f6SNeilBrown * queueing. Let's put @pwq back on mayday list so 2893008847f6SNeilBrown * that such back-to-back work items, which may be 2894008847f6SNeilBrown * being used to relieve memory pressure, don't 2895008847f6SNeilBrown * incur MAYDAY_INTERVAL delay inbetween. 2896008847f6SNeilBrown */ 28974f3f4cf3SLai Jiangshan if (pwq->nr_active && need_to_create_worker(pool)) { 2898a9b8a985SSebastian Andrzej Siewior raw_spin_lock(&wq_mayday_lock); 2899e66b39afSTejun Heo /* 2900e66b39afSTejun Heo * Queue iff we aren't racing destruction 2901e66b39afSTejun Heo * and somebody else hasn't queued it already. 2902e66b39afSTejun Heo */ 2903e66b39afSTejun Heo if (wq->rescuer && list_empty(&pwq->mayday_node)) { 2904008847f6SNeilBrown get_pwq(pwq); 2905e66b39afSTejun Heo list_add_tail(&pwq->mayday_node, &wq->maydays); 2906e66b39afSTejun Heo } 2907a9b8a985SSebastian Andrzej Siewior raw_spin_unlock(&wq_mayday_lock); 2908008847f6SNeilBrown } 2909008847f6SNeilBrown } 2910008847f6SNeilBrown 2911008847f6SNeilBrown /* 291277668c8bSLai Jiangshan * Put the reference grabbed by send_mayday(). @pool won't 291313b1d625SLai Jiangshan * go away while we're still attached to it. 291477668c8bSLai Jiangshan */ 291577668c8bSLai Jiangshan put_pwq(pwq); 291677668c8bSLai Jiangshan 291777668c8bSLai Jiangshan /* 29180219a352STejun Heo * Leave this pool. Notify regular workers; otherwise, we end up 29190219a352STejun Heo * with 0 concurrency and stalling the execution. 29207576958aSTejun Heo */ 29210219a352STejun Heo kick_pool(pool); 29227576958aSTejun Heo 2923a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 292413b1d625SLai Jiangshan 2925a2d812a2STejun Heo worker_detach_from_pool(rescuer); 292613b1d625SLai Jiangshan 2927a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&wq_mayday_lock); 29281da177e4SLinus Torvalds } 29291da177e4SLinus Torvalds 2930a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&wq_mayday_lock); 2931493a1724STejun Heo 29324d595b86SLai Jiangshan if (should_stop) { 29334d595b86SLai Jiangshan __set_current_state(TASK_RUNNING); 2934197f6accSTejun Heo set_pf_worker(false); 29354d595b86SLai Jiangshan return 0; 29364d595b86SLai Jiangshan } 29374d595b86SLai Jiangshan 2938111c225aSTejun Heo /* rescuers should never participate in concurrency management */ 2939111c225aSTejun Heo WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING)); 2940e22bee78STejun Heo schedule(); 2941e22bee78STejun Heo goto repeat; 29421da177e4SLinus Torvalds } 29431da177e4SLinus Torvalds 2944fca839c0STejun Heo /** 2945fca839c0STejun Heo * check_flush_dependency - check for flush dependency sanity 2946fca839c0STejun Heo * @target_wq: workqueue being flushed 2947fca839c0STejun Heo * @target_work: work item being flushed (NULL for workqueue flushes) 2948fca839c0STejun Heo * 2949fca839c0STejun Heo * %current is trying to flush the whole @target_wq or @target_work on it. 2950fca839c0STejun Heo * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not 2951fca839c0STejun Heo * reclaiming memory or running on a workqueue which doesn't have 2952fca839c0STejun Heo * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to 2953fca839c0STejun Heo * a deadlock. 2954fca839c0STejun Heo */ 2955fca839c0STejun Heo static void check_flush_dependency(struct workqueue_struct *target_wq, 2956fca839c0STejun Heo struct work_struct *target_work) 2957fca839c0STejun Heo { 2958fca839c0STejun Heo work_func_t target_func = target_work ? target_work->func : NULL; 2959fca839c0STejun Heo struct worker *worker; 2960fca839c0STejun Heo 2961fca839c0STejun Heo if (target_wq->flags & WQ_MEM_RECLAIM) 2962fca839c0STejun Heo return; 2963fca839c0STejun Heo 2964fca839c0STejun Heo worker = current_wq_worker(); 2965fca839c0STejun Heo 2966fca839c0STejun Heo WARN_ONCE(current->flags & PF_MEMALLOC, 2967d75f773cSSakari Ailus "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps", 2968fca839c0STejun Heo current->pid, current->comm, target_wq->name, target_func); 296923d11a58STejun Heo WARN_ONCE(worker && ((worker->current_pwq->wq->flags & 297023d11a58STejun Heo (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM), 2971d75f773cSSakari Ailus "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps", 2972fca839c0STejun Heo worker->current_pwq->wq->name, worker->current_func, 2973fca839c0STejun Heo target_wq->name, target_func); 2974fca839c0STejun Heo } 2975fca839c0STejun Heo 2976fc2e4d70SOleg Nesterov struct wq_barrier { 2977fc2e4d70SOleg Nesterov struct work_struct work; 2978fc2e4d70SOleg Nesterov struct completion done; 29792607d7a6STejun Heo struct task_struct *task; /* purely informational */ 2980fc2e4d70SOleg Nesterov }; 2981fc2e4d70SOleg Nesterov 2982fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work) 2983fc2e4d70SOleg Nesterov { 2984fc2e4d70SOleg Nesterov struct wq_barrier *barr = container_of(work, struct wq_barrier, work); 2985fc2e4d70SOleg Nesterov complete(&barr->done); 2986fc2e4d70SOleg Nesterov } 2987fc2e4d70SOleg Nesterov 29884690c4abSTejun Heo /** 29894690c4abSTejun Heo * insert_wq_barrier - insert a barrier work 2990112202d9STejun Heo * @pwq: pwq to insert barrier into 29914690c4abSTejun Heo * @barr: wq_barrier to insert 2992affee4b2STejun Heo * @target: target work to attach @barr to 2993affee4b2STejun Heo * @worker: worker currently executing @target, NULL if @target is not executing 29944690c4abSTejun Heo * 2995affee4b2STejun Heo * @barr is linked to @target such that @barr is completed only after 2996affee4b2STejun Heo * @target finishes execution. Please note that the ordering 2997affee4b2STejun Heo * guarantee is observed only with respect to @target and on the local 2998affee4b2STejun Heo * cpu. 2999affee4b2STejun Heo * 3000affee4b2STejun Heo * Currently, a queued barrier can't be canceled. This is because 3001affee4b2STejun Heo * try_to_grab_pending() can't determine whether the work to be 3002affee4b2STejun Heo * grabbed is at the head of the queue and thus can't clear LINKED 3003affee4b2STejun Heo * flag of the previous work while there must be a valid next work 3004affee4b2STejun Heo * after a work with LINKED flag set. 3005affee4b2STejun Heo * 3006affee4b2STejun Heo * Note that when @worker is non-NULL, @target may be modified 3007112202d9STejun Heo * underneath us, so we can't reliably determine pwq from @target. 30084690c4abSTejun Heo * 30094690c4abSTejun Heo * CONTEXT: 3010a9b8a985SSebastian Andrzej Siewior * raw_spin_lock_irq(pool->lock). 30114690c4abSTejun Heo */ 3012112202d9STejun Heo static void insert_wq_barrier(struct pool_workqueue *pwq, 3013affee4b2STejun Heo struct wq_barrier *barr, 3014affee4b2STejun Heo struct work_struct *target, struct worker *worker) 3015fc2e4d70SOleg Nesterov { 3016d812796eSLai Jiangshan unsigned int work_flags = 0; 3017d812796eSLai Jiangshan unsigned int work_color; 3018affee4b2STejun Heo struct list_head *head; 3019affee4b2STejun Heo 3020dc186ad7SThomas Gleixner /* 3021d565ed63STejun Heo * debugobject calls are safe here even with pool->lock locked 3022dc186ad7SThomas Gleixner * as we know for sure that this will not trigger any of the 3023dc186ad7SThomas Gleixner * checks and call back into the fixup functions where we 3024dc186ad7SThomas Gleixner * might deadlock. 3025dc186ad7SThomas Gleixner */ 3026ca1cab37SAndrew Morton INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); 302722df02bbSTejun Heo __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); 302852fa5bc5SBoqun Feng 3029fd1a5b04SByungchul Park init_completion_map(&barr->done, &target->lockdep_map); 3030fd1a5b04SByungchul Park 30312607d7a6STejun Heo barr->task = current; 303283c22520SOleg Nesterov 30336741dd3fSGreg Kroah-Hartman /* The barrier work item does not participate in pwq->nr_active. */ 3034018f3a13SLai Jiangshan work_flags |= WORK_STRUCT_INACTIVE; 3035018f3a13SLai Jiangshan 3036affee4b2STejun Heo /* 3037affee4b2STejun Heo * If @target is currently being executed, schedule the 3038affee4b2STejun Heo * barrier to the worker; otherwise, put it after @target. 3039affee4b2STejun Heo */ 3040d812796eSLai Jiangshan if (worker) { 3041affee4b2STejun Heo head = worker->scheduled.next; 3042d812796eSLai Jiangshan work_color = worker->current_color; 3043d812796eSLai Jiangshan } else { 3044affee4b2STejun Heo unsigned long *bits = work_data_bits(target); 3045affee4b2STejun Heo 3046affee4b2STejun Heo head = target->entry.next; 3047affee4b2STejun Heo /* there can already be other linked works, inherit and set */ 3048d21cece0SLai Jiangshan work_flags |= *bits & WORK_STRUCT_LINKED; 3049d812796eSLai Jiangshan work_color = get_work_color(*bits); 3050affee4b2STejun Heo __set_bit(WORK_STRUCT_LINKED_BIT, bits); 3051affee4b2STejun Heo } 3052affee4b2STejun Heo 3053d812796eSLai Jiangshan pwq->nr_in_flight[work_color]++; 3054d812796eSLai Jiangshan work_flags |= work_color_to_flags(work_color); 3055d812796eSLai Jiangshan 3056d21cece0SLai Jiangshan insert_work(pwq, &barr->work, head, work_flags); 3057fc2e4d70SOleg Nesterov } 3058fc2e4d70SOleg Nesterov 305973f53c4aSTejun Heo /** 3060112202d9STejun Heo * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing 306173f53c4aSTejun Heo * @wq: workqueue being flushed 306273f53c4aSTejun Heo * @flush_color: new flush color, < 0 for no-op 306373f53c4aSTejun Heo * @work_color: new work color, < 0 for no-op 306473f53c4aSTejun Heo * 3065112202d9STejun Heo * Prepare pwqs for workqueue flushing. 306673f53c4aSTejun Heo * 3067112202d9STejun Heo * If @flush_color is non-negative, flush_color on all pwqs should be 3068112202d9STejun Heo * -1. If no pwq has in-flight commands at the specified color, all 3069112202d9STejun Heo * pwq->flush_color's stay at -1 and %false is returned. If any pwq 3070112202d9STejun Heo * has in flight commands, its pwq->flush_color is set to 3071112202d9STejun Heo * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq 307273f53c4aSTejun Heo * wakeup logic is armed and %true is returned. 307373f53c4aSTejun Heo * 307473f53c4aSTejun Heo * The caller should have initialized @wq->first_flusher prior to 307573f53c4aSTejun Heo * calling this function with non-negative @flush_color. If 307673f53c4aSTejun Heo * @flush_color is negative, no flush color update is done and %false 307773f53c4aSTejun Heo * is returned. 307873f53c4aSTejun Heo * 3079112202d9STejun Heo * If @work_color is non-negative, all pwqs should have the same 308073f53c4aSTejun Heo * work_color which is previous to @work_color and all will be 308173f53c4aSTejun Heo * advanced to @work_color. 308273f53c4aSTejun Heo * 308373f53c4aSTejun Heo * CONTEXT: 30843c25a55dSLai Jiangshan * mutex_lock(wq->mutex). 308573f53c4aSTejun Heo * 3086d185af30SYacine Belkadi * Return: 308773f53c4aSTejun Heo * %true if @flush_color >= 0 and there's something to flush. %false 308873f53c4aSTejun Heo * otherwise. 308973f53c4aSTejun Heo */ 3090112202d9STejun Heo static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, 309173f53c4aSTejun Heo int flush_color, int work_color) 30921da177e4SLinus Torvalds { 309373f53c4aSTejun Heo bool wait = false; 309449e3cf44STejun Heo struct pool_workqueue *pwq; 30951da177e4SLinus Torvalds 309673f53c4aSTejun Heo if (flush_color >= 0) { 30976183c009STejun Heo WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush)); 3098112202d9STejun Heo atomic_set(&wq->nr_pwqs_to_flush, 1); 3099dc186ad7SThomas Gleixner } 310014441960SOleg Nesterov 310149e3cf44STejun Heo for_each_pwq(pwq, wq) { 3102112202d9STejun Heo struct worker_pool *pool = pwq->pool; 31031da177e4SLinus Torvalds 3104a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pool->lock); 310573f53c4aSTejun Heo 310673f53c4aSTejun Heo if (flush_color >= 0) { 31076183c009STejun Heo WARN_ON_ONCE(pwq->flush_color != -1); 310873f53c4aSTejun Heo 3109112202d9STejun Heo if (pwq->nr_in_flight[flush_color]) { 3110112202d9STejun Heo pwq->flush_color = flush_color; 3111112202d9STejun Heo atomic_inc(&wq->nr_pwqs_to_flush); 311273f53c4aSTejun Heo wait = true; 31131da177e4SLinus Torvalds } 311473f53c4aSTejun Heo } 311573f53c4aSTejun Heo 311673f53c4aSTejun Heo if (work_color >= 0) { 31176183c009STejun Heo WARN_ON_ONCE(work_color != work_next_color(pwq->work_color)); 3118112202d9STejun Heo pwq->work_color = work_color; 311973f53c4aSTejun Heo } 312073f53c4aSTejun Heo 3121a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 31221da177e4SLinus Torvalds } 31231da177e4SLinus Torvalds 3124112202d9STejun Heo if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) 312573f53c4aSTejun Heo complete(&wq->first_flusher->done); 312673f53c4aSTejun Heo 312773f53c4aSTejun Heo return wait; 312883c22520SOleg Nesterov } 31291da177e4SLinus Torvalds 31300fcb78c2SRolf Eike Beer /** 3131c4f135d6STetsuo Handa * __flush_workqueue - ensure that any scheduled work has run to completion. 31320fcb78c2SRolf Eike Beer * @wq: workqueue to flush 31331da177e4SLinus Torvalds * 3134c5aa87bbSTejun Heo * This function sleeps until all work items which were queued on entry 3135c5aa87bbSTejun Heo * have finished execution, but it is not livelocked by new incoming ones. 31361da177e4SLinus Torvalds */ 3137c4f135d6STetsuo Handa void __flush_workqueue(struct workqueue_struct *wq) 31381da177e4SLinus Torvalds { 313973f53c4aSTejun Heo struct wq_flusher this_flusher = { 314073f53c4aSTejun Heo .list = LIST_HEAD_INIT(this_flusher.list), 314173f53c4aSTejun Heo .flush_color = -1, 3142fd1a5b04SByungchul Park .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map), 314373f53c4aSTejun Heo }; 314473f53c4aSTejun Heo int next_color; 3145b1f4ec17SOleg Nesterov 31463347fa09STejun Heo if (WARN_ON(!wq_online)) 31473347fa09STejun Heo return; 31483347fa09STejun Heo 314987915adcSJohannes Berg lock_map_acquire(&wq->lockdep_map); 315087915adcSJohannes Berg lock_map_release(&wq->lockdep_map); 315187915adcSJohannes Berg 31523c25a55dSLai Jiangshan mutex_lock(&wq->mutex); 315373f53c4aSTejun Heo 315473f53c4aSTejun Heo /* 315573f53c4aSTejun Heo * Start-to-wait phase 315673f53c4aSTejun Heo */ 315773f53c4aSTejun Heo next_color = work_next_color(wq->work_color); 315873f53c4aSTejun Heo 315973f53c4aSTejun Heo if (next_color != wq->flush_color) { 316073f53c4aSTejun Heo /* 316173f53c4aSTejun Heo * Color space is not full. The current work_color 316273f53c4aSTejun Heo * becomes our flush_color and work_color is advanced 316373f53c4aSTejun Heo * by one. 316473f53c4aSTejun Heo */ 31656183c009STejun Heo WARN_ON_ONCE(!list_empty(&wq->flusher_overflow)); 316673f53c4aSTejun Heo this_flusher.flush_color = wq->work_color; 316773f53c4aSTejun Heo wq->work_color = next_color; 316873f53c4aSTejun Heo 316973f53c4aSTejun Heo if (!wq->first_flusher) { 317073f53c4aSTejun Heo /* no flush in progress, become the first flusher */ 31716183c009STejun Heo WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 317273f53c4aSTejun Heo 317373f53c4aSTejun Heo wq->first_flusher = &this_flusher; 317473f53c4aSTejun Heo 3175112202d9STejun Heo if (!flush_workqueue_prep_pwqs(wq, wq->flush_color, 317673f53c4aSTejun Heo wq->work_color)) { 317773f53c4aSTejun Heo /* nothing to flush, done */ 317873f53c4aSTejun Heo wq->flush_color = next_color; 317973f53c4aSTejun Heo wq->first_flusher = NULL; 318073f53c4aSTejun Heo goto out_unlock; 318173f53c4aSTejun Heo } 318273f53c4aSTejun Heo } else { 318373f53c4aSTejun Heo /* wait in queue */ 31846183c009STejun Heo WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color); 318573f53c4aSTejun Heo list_add_tail(&this_flusher.list, &wq->flusher_queue); 3186112202d9STejun Heo flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 318773f53c4aSTejun Heo } 318873f53c4aSTejun Heo } else { 318973f53c4aSTejun Heo /* 319073f53c4aSTejun Heo * Oops, color space is full, wait on overflow queue. 319173f53c4aSTejun Heo * The next flush completion will assign us 319273f53c4aSTejun Heo * flush_color and transfer to flusher_queue. 319373f53c4aSTejun Heo */ 319473f53c4aSTejun Heo list_add_tail(&this_flusher.list, &wq->flusher_overflow); 319573f53c4aSTejun Heo } 319673f53c4aSTejun Heo 3197fca839c0STejun Heo check_flush_dependency(wq, NULL); 3198fca839c0STejun Heo 31993c25a55dSLai Jiangshan mutex_unlock(&wq->mutex); 320073f53c4aSTejun Heo 320173f53c4aSTejun Heo wait_for_completion(&this_flusher.done); 320273f53c4aSTejun Heo 320373f53c4aSTejun Heo /* 320473f53c4aSTejun Heo * Wake-up-and-cascade phase 320573f53c4aSTejun Heo * 320673f53c4aSTejun Heo * First flushers are responsible for cascading flushes and 320773f53c4aSTejun Heo * handling overflow. Non-first flushers can simply return. 320873f53c4aSTejun Heo */ 320900d5d15bSChris Wilson if (READ_ONCE(wq->first_flusher) != &this_flusher) 321073f53c4aSTejun Heo return; 321173f53c4aSTejun Heo 32123c25a55dSLai Jiangshan mutex_lock(&wq->mutex); 321373f53c4aSTejun Heo 32144ce48b37STejun Heo /* we might have raced, check again with mutex held */ 32154ce48b37STejun Heo if (wq->first_flusher != &this_flusher) 32164ce48b37STejun Heo goto out_unlock; 32174ce48b37STejun Heo 321800d5d15bSChris Wilson WRITE_ONCE(wq->first_flusher, NULL); 321973f53c4aSTejun Heo 32206183c009STejun Heo WARN_ON_ONCE(!list_empty(&this_flusher.list)); 32216183c009STejun Heo WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 322273f53c4aSTejun Heo 322373f53c4aSTejun Heo while (true) { 322473f53c4aSTejun Heo struct wq_flusher *next, *tmp; 322573f53c4aSTejun Heo 322673f53c4aSTejun Heo /* complete all the flushers sharing the current flush color */ 322773f53c4aSTejun Heo list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { 322873f53c4aSTejun Heo if (next->flush_color != wq->flush_color) 322973f53c4aSTejun Heo break; 323073f53c4aSTejun Heo list_del_init(&next->list); 323173f53c4aSTejun Heo complete(&next->done); 323273f53c4aSTejun Heo } 323373f53c4aSTejun Heo 32346183c009STejun Heo WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) && 323573f53c4aSTejun Heo wq->flush_color != work_next_color(wq->work_color)); 323673f53c4aSTejun Heo 323773f53c4aSTejun Heo /* this flush_color is finished, advance by one */ 323873f53c4aSTejun Heo wq->flush_color = work_next_color(wq->flush_color); 323973f53c4aSTejun Heo 324073f53c4aSTejun Heo /* one color has been freed, handle overflow queue */ 324173f53c4aSTejun Heo if (!list_empty(&wq->flusher_overflow)) { 324273f53c4aSTejun Heo /* 324373f53c4aSTejun Heo * Assign the same color to all overflowed 324473f53c4aSTejun Heo * flushers, advance work_color and append to 324573f53c4aSTejun Heo * flusher_queue. This is the start-to-wait 324673f53c4aSTejun Heo * phase for these overflowed flushers. 324773f53c4aSTejun Heo */ 324873f53c4aSTejun Heo list_for_each_entry(tmp, &wq->flusher_overflow, list) 324973f53c4aSTejun Heo tmp->flush_color = wq->work_color; 325073f53c4aSTejun Heo 325173f53c4aSTejun Heo wq->work_color = work_next_color(wq->work_color); 325273f53c4aSTejun Heo 325373f53c4aSTejun Heo list_splice_tail_init(&wq->flusher_overflow, 325473f53c4aSTejun Heo &wq->flusher_queue); 3255112202d9STejun Heo flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 325673f53c4aSTejun Heo } 325773f53c4aSTejun Heo 325873f53c4aSTejun Heo if (list_empty(&wq->flusher_queue)) { 32596183c009STejun Heo WARN_ON_ONCE(wq->flush_color != wq->work_color); 326073f53c4aSTejun Heo break; 326173f53c4aSTejun Heo } 326273f53c4aSTejun Heo 326373f53c4aSTejun Heo /* 326473f53c4aSTejun Heo * Need to flush more colors. Make the next flusher 3265112202d9STejun Heo * the new first flusher and arm pwqs. 326673f53c4aSTejun Heo */ 32676183c009STejun Heo WARN_ON_ONCE(wq->flush_color == wq->work_color); 32686183c009STejun Heo WARN_ON_ONCE(wq->flush_color != next->flush_color); 326973f53c4aSTejun Heo 327073f53c4aSTejun Heo list_del_init(&next->list); 327173f53c4aSTejun Heo wq->first_flusher = next; 327273f53c4aSTejun Heo 3273112202d9STejun Heo if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1)) 327473f53c4aSTejun Heo break; 327573f53c4aSTejun Heo 327673f53c4aSTejun Heo /* 327773f53c4aSTejun Heo * Meh... this color is already done, clear first 327873f53c4aSTejun Heo * flusher and repeat cascading. 327973f53c4aSTejun Heo */ 328073f53c4aSTejun Heo wq->first_flusher = NULL; 328173f53c4aSTejun Heo } 328273f53c4aSTejun Heo 328373f53c4aSTejun Heo out_unlock: 32843c25a55dSLai Jiangshan mutex_unlock(&wq->mutex); 32851da177e4SLinus Torvalds } 3286c4f135d6STetsuo Handa EXPORT_SYMBOL(__flush_workqueue); 32871da177e4SLinus Torvalds 32889c5a2ba7STejun Heo /** 32899c5a2ba7STejun Heo * drain_workqueue - drain a workqueue 32909c5a2ba7STejun Heo * @wq: workqueue to drain 32919c5a2ba7STejun Heo * 32929c5a2ba7STejun Heo * Wait until the workqueue becomes empty. While draining is in progress, 32939c5a2ba7STejun Heo * only chain queueing is allowed. IOW, only currently pending or running 32949c5a2ba7STejun Heo * work items on @wq can queue further work items on it. @wq is flushed 3295b749b1b6SChen Hanxiao * repeatedly until it becomes empty. The number of flushing is determined 32969c5a2ba7STejun Heo * by the depth of chaining and should be relatively short. Whine if it 32979c5a2ba7STejun Heo * takes too long. 32989c5a2ba7STejun Heo */ 32999c5a2ba7STejun Heo void drain_workqueue(struct workqueue_struct *wq) 33009c5a2ba7STejun Heo { 33019c5a2ba7STejun Heo unsigned int flush_cnt = 0; 330249e3cf44STejun Heo struct pool_workqueue *pwq; 33039c5a2ba7STejun Heo 33049c5a2ba7STejun Heo /* 33059c5a2ba7STejun Heo * __queue_work() needs to test whether there are drainers, is much 33069c5a2ba7STejun Heo * hotter than drain_workqueue() and already looks at @wq->flags. 3307618b01ebSTejun Heo * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers. 33089c5a2ba7STejun Heo */ 330987fc741eSLai Jiangshan mutex_lock(&wq->mutex); 33109c5a2ba7STejun Heo if (!wq->nr_drainers++) 3311618b01ebSTejun Heo wq->flags |= __WQ_DRAINING; 331287fc741eSLai Jiangshan mutex_unlock(&wq->mutex); 33139c5a2ba7STejun Heo reflush: 3314c4f135d6STetsuo Handa __flush_workqueue(wq); 33159c5a2ba7STejun Heo 3316b09f4fd3SLai Jiangshan mutex_lock(&wq->mutex); 331776af4d93STejun Heo 331849e3cf44STejun Heo for_each_pwq(pwq, wq) { 3319fa2563e4SThomas Tuttle bool drained; 33209c5a2ba7STejun Heo 3321a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pwq->pool->lock); 332235bf38ddSGreg Kroah-Hartman drained = !pwq->nr_active && list_empty(&pwq->inactive_works); 3323a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pwq->pool->lock); 3324fa2563e4SThomas Tuttle 3325fa2563e4SThomas Tuttle if (drained) 33269c5a2ba7STejun Heo continue; 33279c5a2ba7STejun Heo 33289c5a2ba7STejun Heo if (++flush_cnt == 10 || 33299c5a2ba7STejun Heo (flush_cnt % 100 == 0 && flush_cnt <= 1000)) 3330e9ad2eb3SStephen Zhang pr_warn("workqueue %s: %s() isn't complete after %u tries\n", 3331e9ad2eb3SStephen Zhang wq->name, __func__, flush_cnt); 333276af4d93STejun Heo 3333b09f4fd3SLai Jiangshan mutex_unlock(&wq->mutex); 33349c5a2ba7STejun Heo goto reflush; 33359c5a2ba7STejun Heo } 33369c5a2ba7STejun Heo 33379c5a2ba7STejun Heo if (!--wq->nr_drainers) 3338618b01ebSTejun Heo wq->flags &= ~__WQ_DRAINING; 333987fc741eSLai Jiangshan mutex_unlock(&wq->mutex); 33409c5a2ba7STejun Heo } 33419c5a2ba7STejun Heo EXPORT_SYMBOL_GPL(drain_workqueue); 33429c5a2ba7STejun Heo 3343d6e89786SJohannes Berg static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, 3344d6e89786SJohannes Berg bool from_cancel) 3345baf59022STejun Heo { 3346baf59022STejun Heo struct worker *worker = NULL; 3347c9e7cf27STejun Heo struct worker_pool *pool; 3348112202d9STejun Heo struct pool_workqueue *pwq; 3349baf59022STejun Heo 3350baf59022STejun Heo might_sleep(); 3351baf59022STejun Heo 335224acfb71SThomas Gleixner rcu_read_lock(); 3353fa1b54e6STejun Heo pool = get_work_pool(work); 3354fa1b54e6STejun Heo if (!pool) { 335524acfb71SThomas Gleixner rcu_read_unlock(); 3356fa1b54e6STejun Heo return false; 3357fa1b54e6STejun Heo } 3358fa1b54e6STejun Heo 3359a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pool->lock); 33600b3dae68SLai Jiangshan /* see the comment in try_to_grab_pending() with the same code */ 3361112202d9STejun Heo pwq = get_work_pwq(work); 3362112202d9STejun Heo if (pwq) { 3363112202d9STejun Heo if (unlikely(pwq->pool != pool)) 3364baf59022STejun Heo goto already_gone; 3365606a5020STejun Heo } else { 3366c9e7cf27STejun Heo worker = find_worker_executing_work(pool, work); 3367baf59022STejun Heo if (!worker) 3368baf59022STejun Heo goto already_gone; 3369112202d9STejun Heo pwq = worker->current_pwq; 3370606a5020STejun Heo } 3371baf59022STejun Heo 3372fca839c0STejun Heo check_flush_dependency(pwq->wq, work); 3373fca839c0STejun Heo 3374112202d9STejun Heo insert_wq_barrier(pwq, barr, work, worker); 3375a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 3376baf59022STejun Heo 3377e159489bSTejun Heo /* 3378a1d14934SPeter Zijlstra * Force a lock recursion deadlock when using flush_work() inside a 3379a1d14934SPeter Zijlstra * single-threaded or rescuer equipped workqueue. 3380a1d14934SPeter Zijlstra * 3381a1d14934SPeter Zijlstra * For single threaded workqueues the deadlock happens when the work 3382a1d14934SPeter Zijlstra * is after the work issuing the flush_work(). For rescuer equipped 3383a1d14934SPeter Zijlstra * workqueues the deadlock happens when the rescuer stalls, blocking 3384a1d14934SPeter Zijlstra * forward progress. 3385e159489bSTejun Heo */ 3386d6e89786SJohannes Berg if (!from_cancel && 3387d6e89786SJohannes Berg (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) { 3388112202d9STejun Heo lock_map_acquire(&pwq->wq->lockdep_map); 3389112202d9STejun Heo lock_map_release(&pwq->wq->lockdep_map); 3390a1d14934SPeter Zijlstra } 339124acfb71SThomas Gleixner rcu_read_unlock(); 3392baf59022STejun Heo return true; 3393baf59022STejun Heo already_gone: 3394a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 339524acfb71SThomas Gleixner rcu_read_unlock(); 3396baf59022STejun Heo return false; 3397baf59022STejun Heo } 3398baf59022STejun Heo 3399d6e89786SJohannes Berg static bool __flush_work(struct work_struct *work, bool from_cancel) 3400d6e89786SJohannes Berg { 3401d6e89786SJohannes Berg struct wq_barrier barr; 3402d6e89786SJohannes Berg 3403d6e89786SJohannes Berg if (WARN_ON(!wq_online)) 3404d6e89786SJohannes Berg return false; 3405d6e89786SJohannes Berg 34064d43d395STetsuo Handa if (WARN_ON(!work->func)) 34074d43d395STetsuo Handa return false; 34084d43d395STetsuo Handa 340987915adcSJohannes Berg lock_map_acquire(&work->lockdep_map); 341087915adcSJohannes Berg lock_map_release(&work->lockdep_map); 341187915adcSJohannes Berg 3412d6e89786SJohannes Berg if (start_flush_work(work, &barr, from_cancel)) { 3413d6e89786SJohannes Berg wait_for_completion(&barr.done); 3414d6e89786SJohannes Berg destroy_work_on_stack(&barr.work); 3415d6e89786SJohannes Berg return true; 3416d6e89786SJohannes Berg } else { 3417d6e89786SJohannes Berg return false; 3418d6e89786SJohannes Berg } 3419d6e89786SJohannes Berg } 3420d6e89786SJohannes Berg 3421db700897SOleg Nesterov /** 3422401a8d04STejun Heo * flush_work - wait for a work to finish executing the last queueing instance 3423401a8d04STejun Heo * @work: the work to flush 3424db700897SOleg Nesterov * 3425606a5020STejun Heo * Wait until @work has finished execution. @work is guaranteed to be idle 3426606a5020STejun Heo * on return if it hasn't been requeued since flush started. 3427401a8d04STejun Heo * 3428d185af30SYacine Belkadi * Return: 3429401a8d04STejun Heo * %true if flush_work() waited for the work to finish execution, 3430401a8d04STejun Heo * %false if it was already idle. 3431db700897SOleg Nesterov */ 3432401a8d04STejun Heo bool flush_work(struct work_struct *work) 3433db700897SOleg Nesterov { 3434d6e89786SJohannes Berg return __flush_work(work, false); 3435606a5020STejun Heo } 3436db700897SOleg Nesterov EXPORT_SYMBOL_GPL(flush_work); 3437db700897SOleg Nesterov 34388603e1b3STejun Heo struct cwt_wait { 3439ac6424b9SIngo Molnar wait_queue_entry_t wait; 34408603e1b3STejun Heo struct work_struct *work; 34418603e1b3STejun Heo }; 34428603e1b3STejun Heo 3443ac6424b9SIngo Molnar static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 34448603e1b3STejun Heo { 34458603e1b3STejun Heo struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); 34468603e1b3STejun Heo 34478603e1b3STejun Heo if (cwait->work != key) 34488603e1b3STejun Heo return 0; 34498603e1b3STejun Heo return autoremove_wake_function(wait, mode, sync, key); 34508603e1b3STejun Heo } 34518603e1b3STejun Heo 345236e227d2STejun Heo static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) 3453401a8d04STejun Heo { 34548603e1b3STejun Heo static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); 3455bbb68dfaSTejun Heo unsigned long flags; 34561f1f642eSOleg Nesterov int ret; 34571f1f642eSOleg Nesterov 34581f1f642eSOleg Nesterov do { 3459bbb68dfaSTejun Heo ret = try_to_grab_pending(work, is_dwork, &flags); 3460bbb68dfaSTejun Heo /* 34618603e1b3STejun Heo * If someone else is already canceling, wait for it to 34628603e1b3STejun Heo * finish. flush_work() doesn't work for PREEMPT_NONE 34638603e1b3STejun Heo * because we may get scheduled between @work's completion 34648603e1b3STejun Heo * and the other canceling task resuming and clearing 34658603e1b3STejun Heo * CANCELING - flush_work() will return false immediately 34668603e1b3STejun Heo * as @work is no longer busy, try_to_grab_pending() will 34678603e1b3STejun Heo * return -ENOENT as @work is still being canceled and the 34688603e1b3STejun Heo * other canceling task won't be able to clear CANCELING as 34698603e1b3STejun Heo * we're hogging the CPU. 34708603e1b3STejun Heo * 34718603e1b3STejun Heo * Let's wait for completion using a waitqueue. As this 34728603e1b3STejun Heo * may lead to the thundering herd problem, use a custom 34738603e1b3STejun Heo * wake function which matches @work along with exclusive 34748603e1b3STejun Heo * wait and wakeup. 3475bbb68dfaSTejun Heo */ 34768603e1b3STejun Heo if (unlikely(ret == -ENOENT)) { 34778603e1b3STejun Heo struct cwt_wait cwait; 34788603e1b3STejun Heo 34798603e1b3STejun Heo init_wait(&cwait.wait); 34808603e1b3STejun Heo cwait.wait.func = cwt_wakefn; 34818603e1b3STejun Heo cwait.work = work; 34828603e1b3STejun Heo 34838603e1b3STejun Heo prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait, 34848603e1b3STejun Heo TASK_UNINTERRUPTIBLE); 34858603e1b3STejun Heo if (work_is_canceling(work)) 34868603e1b3STejun Heo schedule(); 34878603e1b3STejun Heo finish_wait(&cancel_waitq, &cwait.wait); 34888603e1b3STejun Heo } 34891f1f642eSOleg Nesterov } while (unlikely(ret < 0)); 34901f1f642eSOleg Nesterov 3491bbb68dfaSTejun Heo /* tell other tasks trying to grab @work to back off */ 3492bbb68dfaSTejun Heo mark_work_canceling(work); 3493bbb68dfaSTejun Heo local_irq_restore(flags); 3494bbb68dfaSTejun Heo 34953347fa09STejun Heo /* 34963347fa09STejun Heo * This allows canceling during early boot. We know that @work 34973347fa09STejun Heo * isn't executing. 34983347fa09STejun Heo */ 34993347fa09STejun Heo if (wq_online) 3500d6e89786SJohannes Berg __flush_work(work, true); 35013347fa09STejun Heo 35027a22ad75STejun Heo clear_work_data(work); 35038603e1b3STejun Heo 35048603e1b3STejun Heo /* 35058603e1b3STejun Heo * Paired with prepare_to_wait() above so that either 35068603e1b3STejun Heo * waitqueue_active() is visible here or !work_is_canceling() is 35078603e1b3STejun Heo * visible there. 35088603e1b3STejun Heo */ 35098603e1b3STejun Heo smp_mb(); 35108603e1b3STejun Heo if (waitqueue_active(&cancel_waitq)) 35118603e1b3STejun Heo __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); 35128603e1b3STejun Heo 35131f1f642eSOleg Nesterov return ret; 35141f1f642eSOleg Nesterov } 35151f1f642eSOleg Nesterov 35166e84d644SOleg Nesterov /** 3517401a8d04STejun Heo * cancel_work_sync - cancel a work and wait for it to finish 3518401a8d04STejun Heo * @work: the work to cancel 35196e84d644SOleg Nesterov * 3520401a8d04STejun Heo * Cancel @work and wait for its execution to finish. This function 3521401a8d04STejun Heo * can be used even if the work re-queues itself or migrates to 3522401a8d04STejun Heo * another workqueue. On return from this function, @work is 3523401a8d04STejun Heo * guaranteed to be not pending or executing on any CPU. 35241f1f642eSOleg Nesterov * 3525401a8d04STejun Heo * cancel_work_sync(&delayed_work->work) must not be used for 3526401a8d04STejun Heo * delayed_work's. Use cancel_delayed_work_sync() instead. 35276e84d644SOleg Nesterov * 3528401a8d04STejun Heo * The caller must ensure that the workqueue on which @work was last 35296e84d644SOleg Nesterov * queued can't be destroyed before this function returns. 3530401a8d04STejun Heo * 3531d185af30SYacine Belkadi * Return: 3532401a8d04STejun Heo * %true if @work was pending, %false otherwise. 35336e84d644SOleg Nesterov */ 3534401a8d04STejun Heo bool cancel_work_sync(struct work_struct *work) 35356e84d644SOleg Nesterov { 353636e227d2STejun Heo return __cancel_work_timer(work, false); 3537b89deed3SOleg Nesterov } 353828e53bddSOleg Nesterov EXPORT_SYMBOL_GPL(cancel_work_sync); 3539b89deed3SOleg Nesterov 35406e84d644SOleg Nesterov /** 3541401a8d04STejun Heo * flush_delayed_work - wait for a dwork to finish executing the last queueing 3542401a8d04STejun Heo * @dwork: the delayed work to flush 35436e84d644SOleg Nesterov * 3544401a8d04STejun Heo * Delayed timer is cancelled and the pending work is queued for 3545401a8d04STejun Heo * immediate execution. Like flush_work(), this function only 3546401a8d04STejun Heo * considers the last queueing instance of @dwork. 35471f1f642eSOleg Nesterov * 3548d185af30SYacine Belkadi * Return: 3549401a8d04STejun Heo * %true if flush_work() waited for the work to finish execution, 3550401a8d04STejun Heo * %false if it was already idle. 35516e84d644SOleg Nesterov */ 3552401a8d04STejun Heo bool flush_delayed_work(struct delayed_work *dwork) 3553401a8d04STejun Heo { 35548930cabaSTejun Heo local_irq_disable(); 3555401a8d04STejun Heo if (del_timer_sync(&dwork->timer)) 355660c057bcSLai Jiangshan __queue_work(dwork->cpu, dwork->wq, &dwork->work); 35578930cabaSTejun Heo local_irq_enable(); 3558401a8d04STejun Heo return flush_work(&dwork->work); 3559401a8d04STejun Heo } 3560401a8d04STejun Heo EXPORT_SYMBOL(flush_delayed_work); 3561401a8d04STejun Heo 356205f0fe6bSTejun Heo /** 356305f0fe6bSTejun Heo * flush_rcu_work - wait for a rwork to finish executing the last queueing 356405f0fe6bSTejun Heo * @rwork: the rcu work to flush 356505f0fe6bSTejun Heo * 356605f0fe6bSTejun Heo * Return: 356705f0fe6bSTejun Heo * %true if flush_rcu_work() waited for the work to finish execution, 356805f0fe6bSTejun Heo * %false if it was already idle. 356905f0fe6bSTejun Heo */ 357005f0fe6bSTejun Heo bool flush_rcu_work(struct rcu_work *rwork) 357105f0fe6bSTejun Heo { 357205f0fe6bSTejun Heo if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) { 357305f0fe6bSTejun Heo rcu_barrier(); 357405f0fe6bSTejun Heo flush_work(&rwork->work); 357505f0fe6bSTejun Heo return true; 357605f0fe6bSTejun Heo } else { 357705f0fe6bSTejun Heo return flush_work(&rwork->work); 357805f0fe6bSTejun Heo } 357905f0fe6bSTejun Heo } 358005f0fe6bSTejun Heo EXPORT_SYMBOL(flush_rcu_work); 358105f0fe6bSTejun Heo 3582f72b8792SJens Axboe static bool __cancel_work(struct work_struct *work, bool is_dwork) 3583f72b8792SJens Axboe { 3584f72b8792SJens Axboe unsigned long flags; 3585f72b8792SJens Axboe int ret; 3586f72b8792SJens Axboe 3587f72b8792SJens Axboe do { 3588f72b8792SJens Axboe ret = try_to_grab_pending(work, is_dwork, &flags); 3589f72b8792SJens Axboe } while (unlikely(ret == -EAGAIN)); 3590f72b8792SJens Axboe 3591f72b8792SJens Axboe if (unlikely(ret < 0)) 3592f72b8792SJens Axboe return false; 3593f72b8792SJens Axboe 3594f72b8792SJens Axboe set_work_pool_and_clear_pending(work, get_work_pool_id(work)); 3595f72b8792SJens Axboe local_irq_restore(flags); 3596f72b8792SJens Axboe return ret; 3597f72b8792SJens Axboe } 3598f72b8792SJens Axboe 359973b4b532SAndrey Grodzovsky /* 360073b4b532SAndrey Grodzovsky * See cancel_delayed_work() 360173b4b532SAndrey Grodzovsky */ 360273b4b532SAndrey Grodzovsky bool cancel_work(struct work_struct *work) 360373b4b532SAndrey Grodzovsky { 360473b4b532SAndrey Grodzovsky return __cancel_work(work, false); 360573b4b532SAndrey Grodzovsky } 360673b4b532SAndrey Grodzovsky EXPORT_SYMBOL(cancel_work); 360773b4b532SAndrey Grodzovsky 3608401a8d04STejun Heo /** 360957b30ae7STejun Heo * cancel_delayed_work - cancel a delayed work 361057b30ae7STejun Heo * @dwork: delayed_work to cancel 361109383498STejun Heo * 3612d185af30SYacine Belkadi * Kill off a pending delayed_work. 3613d185af30SYacine Belkadi * 3614d185af30SYacine Belkadi * Return: %true if @dwork was pending and canceled; %false if it wasn't 3615d185af30SYacine Belkadi * pending. 3616d185af30SYacine Belkadi * 3617d185af30SYacine Belkadi * Note: 3618d185af30SYacine Belkadi * The work callback function may still be running on return, unless 3619d185af30SYacine Belkadi * it returns %true and the work doesn't re-arm itself. Explicitly flush or 3620d185af30SYacine Belkadi * use cancel_delayed_work_sync() to wait on it. 362109383498STejun Heo * 362257b30ae7STejun Heo * This function is safe to call from any context including IRQ handler. 362309383498STejun Heo */ 362457b30ae7STejun Heo bool cancel_delayed_work(struct delayed_work *dwork) 362509383498STejun Heo { 3626f72b8792SJens Axboe return __cancel_work(&dwork->work, true); 362709383498STejun Heo } 362857b30ae7STejun Heo EXPORT_SYMBOL(cancel_delayed_work); 362909383498STejun Heo 363009383498STejun Heo /** 3631401a8d04STejun Heo * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish 3632401a8d04STejun Heo * @dwork: the delayed work cancel 3633401a8d04STejun Heo * 3634401a8d04STejun Heo * This is cancel_work_sync() for delayed works. 3635401a8d04STejun Heo * 3636d185af30SYacine Belkadi * Return: 3637401a8d04STejun Heo * %true if @dwork was pending, %false otherwise. 3638401a8d04STejun Heo */ 3639401a8d04STejun Heo bool cancel_delayed_work_sync(struct delayed_work *dwork) 36406e84d644SOleg Nesterov { 364136e227d2STejun Heo return __cancel_work_timer(&dwork->work, true); 36426e84d644SOleg Nesterov } 3643f5a421a4SOleg Nesterov EXPORT_SYMBOL(cancel_delayed_work_sync); 36441da177e4SLinus Torvalds 36450fcb78c2SRolf Eike Beer /** 364631ddd871STejun Heo * schedule_on_each_cpu - execute a function synchronously on each online CPU 3647b6136773SAndrew Morton * @func: the function to call 3648b6136773SAndrew Morton * 364931ddd871STejun Heo * schedule_on_each_cpu() executes @func on each online CPU using the 365031ddd871STejun Heo * system workqueue and blocks until all CPUs have completed. 3651b6136773SAndrew Morton * schedule_on_each_cpu() is very slow. 365231ddd871STejun Heo * 3653d185af30SYacine Belkadi * Return: 365431ddd871STejun Heo * 0 on success, -errno on failure. 3655b6136773SAndrew Morton */ 365665f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func) 365715316ba8SChristoph Lameter { 365815316ba8SChristoph Lameter int cpu; 365938f51568SNamhyung Kim struct work_struct __percpu *works; 366015316ba8SChristoph Lameter 3661b6136773SAndrew Morton works = alloc_percpu(struct work_struct); 3662b6136773SAndrew Morton if (!works) 366315316ba8SChristoph Lameter return -ENOMEM; 3664b6136773SAndrew Morton 3665ffd8bea8SSebastian Andrzej Siewior cpus_read_lock(); 366693981800STejun Heo 366715316ba8SChristoph Lameter for_each_online_cpu(cpu) { 36689bfb1839SIngo Molnar struct work_struct *work = per_cpu_ptr(works, cpu); 36699bfb1839SIngo Molnar 36709bfb1839SIngo Molnar INIT_WORK(work, func); 36718de6d308SOleg Nesterov schedule_work_on(cpu, work); 367215316ba8SChristoph Lameter } 367393981800STejun Heo 367493981800STejun Heo for_each_online_cpu(cpu) 36758616a89aSOleg Nesterov flush_work(per_cpu_ptr(works, cpu)); 367693981800STejun Heo 3677ffd8bea8SSebastian Andrzej Siewior cpus_read_unlock(); 3678b6136773SAndrew Morton free_percpu(works); 367915316ba8SChristoph Lameter return 0; 368015316ba8SChristoph Lameter } 368115316ba8SChristoph Lameter 3682eef6a7d5SAlan Stern /** 36831fa44ecaSJames Bottomley * execute_in_process_context - reliably execute the routine with user context 36841fa44ecaSJames Bottomley * @fn: the function to execute 36851fa44ecaSJames Bottomley * @ew: guaranteed storage for the execute work structure (must 36861fa44ecaSJames Bottomley * be available when the work executes) 36871fa44ecaSJames Bottomley * 36881fa44ecaSJames Bottomley * Executes the function immediately if process context is available, 36891fa44ecaSJames Bottomley * otherwise schedules the function for delayed execution. 36901fa44ecaSJames Bottomley * 3691d185af30SYacine Belkadi * Return: 0 - function was executed 36921fa44ecaSJames Bottomley * 1 - function was scheduled for execution 36931fa44ecaSJames Bottomley */ 369465f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew) 36951fa44ecaSJames Bottomley { 36961fa44ecaSJames Bottomley if (!in_interrupt()) { 369765f27f38SDavid Howells fn(&ew->work); 36981fa44ecaSJames Bottomley return 0; 36991fa44ecaSJames Bottomley } 37001fa44ecaSJames Bottomley 370165f27f38SDavid Howells INIT_WORK(&ew->work, fn); 37021fa44ecaSJames Bottomley schedule_work(&ew->work); 37031fa44ecaSJames Bottomley 37041fa44ecaSJames Bottomley return 1; 37051fa44ecaSJames Bottomley } 37061fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context); 37071fa44ecaSJames Bottomley 37087a4e344cSTejun Heo /** 37097a4e344cSTejun Heo * free_workqueue_attrs - free a workqueue_attrs 37107a4e344cSTejun Heo * @attrs: workqueue_attrs to free 37117a4e344cSTejun Heo * 37127a4e344cSTejun Heo * Undo alloc_workqueue_attrs(). 37137a4e344cSTejun Heo */ 3714513c98d0SDaniel Jordan void free_workqueue_attrs(struct workqueue_attrs *attrs) 37157a4e344cSTejun Heo { 37167a4e344cSTejun Heo if (attrs) { 37177a4e344cSTejun Heo free_cpumask_var(attrs->cpumask); 37189546b29eSTejun Heo free_cpumask_var(attrs->__pod_cpumask); 37197a4e344cSTejun Heo kfree(attrs); 37207a4e344cSTejun Heo } 37217a4e344cSTejun Heo } 37227a4e344cSTejun Heo 37237a4e344cSTejun Heo /** 37247a4e344cSTejun Heo * alloc_workqueue_attrs - allocate a workqueue_attrs 37257a4e344cSTejun Heo * 37267a4e344cSTejun Heo * Allocate a new workqueue_attrs, initialize with default settings and 3727d185af30SYacine Belkadi * return it. 3728d185af30SYacine Belkadi * 3729d185af30SYacine Belkadi * Return: The allocated new workqueue_attr on success. %NULL on failure. 37307a4e344cSTejun Heo */ 3731513c98d0SDaniel Jordan struct workqueue_attrs *alloc_workqueue_attrs(void) 37327a4e344cSTejun Heo { 37337a4e344cSTejun Heo struct workqueue_attrs *attrs; 37347a4e344cSTejun Heo 3735be69d00dSThomas Gleixner attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); 37367a4e344cSTejun Heo if (!attrs) 37377a4e344cSTejun Heo goto fail; 3738be69d00dSThomas Gleixner if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL)) 37397a4e344cSTejun Heo goto fail; 37409546b29eSTejun Heo if (!alloc_cpumask_var(&attrs->__pod_cpumask, GFP_KERNEL)) 37419546b29eSTejun Heo goto fail; 37427a4e344cSTejun Heo 374313e2e556STejun Heo cpumask_copy(attrs->cpumask, cpu_possible_mask); 3744523a301eSTejun Heo attrs->affn_scope = WQ_AFFN_DFL; 37457a4e344cSTejun Heo return attrs; 37467a4e344cSTejun Heo fail: 37477a4e344cSTejun Heo free_workqueue_attrs(attrs); 37487a4e344cSTejun Heo return NULL; 37497a4e344cSTejun Heo } 37507a4e344cSTejun Heo 375129c91e99STejun Heo static void copy_workqueue_attrs(struct workqueue_attrs *to, 375229c91e99STejun Heo const struct workqueue_attrs *from) 375329c91e99STejun Heo { 375429c91e99STejun Heo to->nice = from->nice; 375529c91e99STejun Heo cpumask_copy(to->cpumask, from->cpumask); 37569546b29eSTejun Heo cpumask_copy(to->__pod_cpumask, from->__pod_cpumask); 37578639ecebSTejun Heo to->affn_strict = from->affn_strict; 375884193c07STejun Heo 37592865a8fbSShaohua Li /* 376084193c07STejun Heo * Unlike hash and equality test, copying shouldn't ignore wq-only 376184193c07STejun Heo * fields as copying is used for both pool and wq attrs. Instead, 376284193c07STejun Heo * get_unbound_pool() explicitly clears the fields. 37632865a8fbSShaohua Li */ 376484193c07STejun Heo to->affn_scope = from->affn_scope; 3765af73f5c9STejun Heo to->ordered = from->ordered; 376629c91e99STejun Heo } 376729c91e99STejun Heo 37685de7a03cSTejun Heo /* 37695de7a03cSTejun Heo * Some attrs fields are workqueue-only. Clear them for worker_pool's. See the 37705de7a03cSTejun Heo * comments in 'struct workqueue_attrs' definition. 37715de7a03cSTejun Heo */ 37725de7a03cSTejun Heo static void wqattrs_clear_for_pool(struct workqueue_attrs *attrs) 37735de7a03cSTejun Heo { 377484193c07STejun Heo attrs->affn_scope = WQ_AFFN_NR_TYPES; 37755de7a03cSTejun Heo attrs->ordered = false; 37765de7a03cSTejun Heo } 37775de7a03cSTejun Heo 377829c91e99STejun Heo /* hash value of the content of @attr */ 377929c91e99STejun Heo static u32 wqattrs_hash(const struct workqueue_attrs *attrs) 378029c91e99STejun Heo { 378129c91e99STejun Heo u32 hash = 0; 378229c91e99STejun Heo 378329c91e99STejun Heo hash = jhash_1word(attrs->nice, hash); 378413e2e556STejun Heo hash = jhash(cpumask_bits(attrs->cpumask), 378513e2e556STejun Heo BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash); 37869546b29eSTejun Heo hash = jhash(cpumask_bits(attrs->__pod_cpumask), 37879546b29eSTejun Heo BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash); 37888639ecebSTejun Heo hash = jhash_1word(attrs->affn_strict, hash); 378929c91e99STejun Heo return hash; 379029c91e99STejun Heo } 379129c91e99STejun Heo 379229c91e99STejun Heo /* content equality test */ 379329c91e99STejun Heo static bool wqattrs_equal(const struct workqueue_attrs *a, 379429c91e99STejun Heo const struct workqueue_attrs *b) 379529c91e99STejun Heo { 379629c91e99STejun Heo if (a->nice != b->nice) 379729c91e99STejun Heo return false; 379829c91e99STejun Heo if (!cpumask_equal(a->cpumask, b->cpumask)) 379929c91e99STejun Heo return false; 38009546b29eSTejun Heo if (!cpumask_equal(a->__pod_cpumask, b->__pod_cpumask)) 38019546b29eSTejun Heo return false; 38028639ecebSTejun Heo if (a->affn_strict != b->affn_strict) 38038639ecebSTejun Heo return false; 380429c91e99STejun Heo return true; 380529c91e99STejun Heo } 380629c91e99STejun Heo 38070f36ee24STejun Heo /* Update @attrs with actually available CPUs */ 38080f36ee24STejun Heo static void wqattrs_actualize_cpumask(struct workqueue_attrs *attrs, 38090f36ee24STejun Heo const cpumask_t *unbound_cpumask) 38100f36ee24STejun Heo { 38110f36ee24STejun Heo /* 38120f36ee24STejun Heo * Calculate the effective CPU mask of @attrs given @unbound_cpumask. If 38130f36ee24STejun Heo * @attrs->cpumask doesn't overlap with @unbound_cpumask, we fallback to 38140f36ee24STejun Heo * @unbound_cpumask. 38150f36ee24STejun Heo */ 38160f36ee24STejun Heo cpumask_and(attrs->cpumask, attrs->cpumask, unbound_cpumask); 38170f36ee24STejun Heo if (unlikely(cpumask_empty(attrs->cpumask))) 38180f36ee24STejun Heo cpumask_copy(attrs->cpumask, unbound_cpumask); 38190f36ee24STejun Heo } 38200f36ee24STejun Heo 382184193c07STejun Heo /* find wq_pod_type to use for @attrs */ 382284193c07STejun Heo static const struct wq_pod_type * 382384193c07STejun Heo wqattrs_pod_type(const struct workqueue_attrs *attrs) 382484193c07STejun Heo { 3825523a301eSTejun Heo enum wq_affn_scope scope; 3826523a301eSTejun Heo struct wq_pod_type *pt; 3827523a301eSTejun Heo 3828523a301eSTejun Heo /* to synchronize access to wq_affn_dfl */ 3829523a301eSTejun Heo lockdep_assert_held(&wq_pool_mutex); 3830523a301eSTejun Heo 3831523a301eSTejun Heo if (attrs->affn_scope == WQ_AFFN_DFL) 3832523a301eSTejun Heo scope = wq_affn_dfl; 3833523a301eSTejun Heo else 3834523a301eSTejun Heo scope = attrs->affn_scope; 3835523a301eSTejun Heo 3836523a301eSTejun Heo pt = &wq_pod_types[scope]; 383784193c07STejun Heo 383884193c07STejun Heo if (!WARN_ON_ONCE(attrs->affn_scope == WQ_AFFN_NR_TYPES) && 383984193c07STejun Heo likely(pt->nr_pods)) 384084193c07STejun Heo return pt; 384184193c07STejun Heo 384284193c07STejun Heo /* 384384193c07STejun Heo * Before workqueue_init_topology(), only SYSTEM is available which is 384484193c07STejun Heo * initialized in workqueue_init_early(). 384584193c07STejun Heo */ 384684193c07STejun Heo pt = &wq_pod_types[WQ_AFFN_SYSTEM]; 384784193c07STejun Heo BUG_ON(!pt->nr_pods); 384884193c07STejun Heo return pt; 384984193c07STejun Heo } 385084193c07STejun Heo 38517a4e344cSTejun Heo /** 38527a4e344cSTejun Heo * init_worker_pool - initialize a newly zalloc'd worker_pool 38537a4e344cSTejun Heo * @pool: worker_pool to initialize 38547a4e344cSTejun Heo * 3855402dd89dSShailendra Verma * Initialize a newly zalloc'd @pool. It also allocates @pool->attrs. 3856d185af30SYacine Belkadi * 3857d185af30SYacine Belkadi * Return: 0 on success, -errno on failure. Even on failure, all fields 385829c91e99STejun Heo * inside @pool proper are initialized and put_unbound_pool() can be called 385929c91e99STejun Heo * on @pool safely to release it. 38607a4e344cSTejun Heo */ 38617a4e344cSTejun Heo static int init_worker_pool(struct worker_pool *pool) 38624e1a1f9aSTejun Heo { 3863a9b8a985SSebastian Andrzej Siewior raw_spin_lock_init(&pool->lock); 386429c91e99STejun Heo pool->id = -1; 386529c91e99STejun Heo pool->cpu = -1; 3866f3f90ad4STejun Heo pool->node = NUMA_NO_NODE; 38674e1a1f9aSTejun Heo pool->flags |= POOL_DISASSOCIATED; 386882607adcSTejun Heo pool->watchdog_ts = jiffies; 38694e1a1f9aSTejun Heo INIT_LIST_HEAD(&pool->worklist); 38704e1a1f9aSTejun Heo INIT_LIST_HEAD(&pool->idle_list); 38714e1a1f9aSTejun Heo hash_init(pool->busy_hash); 38724e1a1f9aSTejun Heo 387332a6c723SKees Cook timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE); 38743f959aa3SValentin Schneider INIT_WORK(&pool->idle_cull_work, idle_cull_fn); 38754e1a1f9aSTejun Heo 387632a6c723SKees Cook timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0); 38774e1a1f9aSTejun Heo 3878da028469SLai Jiangshan INIT_LIST_HEAD(&pool->workers); 3879e02b9312SValentin Schneider INIT_LIST_HEAD(&pool->dying_workers); 38807a4e344cSTejun Heo 38817cda9aaeSLai Jiangshan ida_init(&pool->worker_ida); 388229c91e99STejun Heo INIT_HLIST_NODE(&pool->hash_node); 388329c91e99STejun Heo pool->refcnt = 1; 388429c91e99STejun Heo 388529c91e99STejun Heo /* shouldn't fail above this point */ 3886be69d00dSThomas Gleixner pool->attrs = alloc_workqueue_attrs(); 38877a4e344cSTejun Heo if (!pool->attrs) 38887a4e344cSTejun Heo return -ENOMEM; 38895de7a03cSTejun Heo 38905de7a03cSTejun Heo wqattrs_clear_for_pool(pool->attrs); 38915de7a03cSTejun Heo 38927a4e344cSTejun Heo return 0; 38934e1a1f9aSTejun Heo } 38944e1a1f9aSTejun Heo 3895669de8bdSBart Van Assche #ifdef CONFIG_LOCKDEP 3896669de8bdSBart Van Assche static void wq_init_lockdep(struct workqueue_struct *wq) 3897669de8bdSBart Van Assche { 3898669de8bdSBart Van Assche char *lock_name; 3899669de8bdSBart Van Assche 3900669de8bdSBart Van Assche lockdep_register_key(&wq->key); 3901669de8bdSBart Van Assche lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name); 3902669de8bdSBart Van Assche if (!lock_name) 3903669de8bdSBart Van Assche lock_name = wq->name; 390469a106c0SQian Cai 390569a106c0SQian Cai wq->lock_name = lock_name; 3906669de8bdSBart Van Assche lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0); 3907669de8bdSBart Van Assche } 3908669de8bdSBart Van Assche 3909669de8bdSBart Van Assche static void wq_unregister_lockdep(struct workqueue_struct *wq) 3910669de8bdSBart Van Assche { 3911669de8bdSBart Van Assche lockdep_unregister_key(&wq->key); 3912669de8bdSBart Van Assche } 3913669de8bdSBart Van Assche 3914669de8bdSBart Van Assche static void wq_free_lockdep(struct workqueue_struct *wq) 3915669de8bdSBart Van Assche { 3916669de8bdSBart Van Assche if (wq->lock_name != wq->name) 3917669de8bdSBart Van Assche kfree(wq->lock_name); 3918669de8bdSBart Van Assche } 3919669de8bdSBart Van Assche #else 3920669de8bdSBart Van Assche static void wq_init_lockdep(struct workqueue_struct *wq) 3921669de8bdSBart Van Assche { 3922669de8bdSBart Van Assche } 3923669de8bdSBart Van Assche 3924669de8bdSBart Van Assche static void wq_unregister_lockdep(struct workqueue_struct *wq) 3925669de8bdSBart Van Assche { 3926669de8bdSBart Van Assche } 3927669de8bdSBart Van Assche 3928669de8bdSBart Van Assche static void wq_free_lockdep(struct workqueue_struct *wq) 3929669de8bdSBart Van Assche { 3930669de8bdSBart Van Assche } 3931669de8bdSBart Van Assche #endif 3932669de8bdSBart Van Assche 3933e2dca7adSTejun Heo static void rcu_free_wq(struct rcu_head *rcu) 3934e2dca7adSTejun Heo { 3935e2dca7adSTejun Heo struct workqueue_struct *wq = 3936e2dca7adSTejun Heo container_of(rcu, struct workqueue_struct, rcu); 3937e2dca7adSTejun Heo 3938669de8bdSBart Van Assche wq_free_lockdep(wq); 3939ee1ceef7STejun Heo free_percpu(wq->cpu_pwq); 3940e2dca7adSTejun Heo free_workqueue_attrs(wq->unbound_attrs); 3941e2dca7adSTejun Heo kfree(wq); 3942e2dca7adSTejun Heo } 3943e2dca7adSTejun Heo 394429c91e99STejun Heo static void rcu_free_pool(struct rcu_head *rcu) 394529c91e99STejun Heo { 394629c91e99STejun Heo struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); 394729c91e99STejun Heo 39487cda9aaeSLai Jiangshan ida_destroy(&pool->worker_ida); 394929c91e99STejun Heo free_workqueue_attrs(pool->attrs); 395029c91e99STejun Heo kfree(pool); 395129c91e99STejun Heo } 395229c91e99STejun Heo 395329c91e99STejun Heo /** 395429c91e99STejun Heo * put_unbound_pool - put a worker_pool 395529c91e99STejun Heo * @pool: worker_pool to put 395629c91e99STejun Heo * 395724acfb71SThomas Gleixner * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU 3958c5aa87bbSTejun Heo * safe manner. get_unbound_pool() calls this function on its failure path 3959c5aa87bbSTejun Heo * and this function should be able to release pools which went through, 3960c5aa87bbSTejun Heo * successfully or not, init_worker_pool(). 3961a892caccSTejun Heo * 3962a892caccSTejun Heo * Should be called with wq_pool_mutex held. 396329c91e99STejun Heo */ 396429c91e99STejun Heo static void put_unbound_pool(struct worker_pool *pool) 396529c91e99STejun Heo { 396660f5a4bcSLai Jiangshan DECLARE_COMPLETION_ONSTACK(detach_completion); 396729c91e99STejun Heo struct worker *worker; 39689680540cSYang Yingliang LIST_HEAD(cull_list); 3969e02b9312SValentin Schneider 3970a892caccSTejun Heo lockdep_assert_held(&wq_pool_mutex); 3971a892caccSTejun Heo 3972a892caccSTejun Heo if (--pool->refcnt) 397329c91e99STejun Heo return; 397429c91e99STejun Heo 397529c91e99STejun Heo /* sanity checks */ 397661d0fbb4SLai Jiangshan if (WARN_ON(!(pool->cpu < 0)) || 3977a892caccSTejun Heo WARN_ON(!list_empty(&pool->worklist))) 397829c91e99STejun Heo return; 397929c91e99STejun Heo 398029c91e99STejun Heo /* release id and unhash */ 398129c91e99STejun Heo if (pool->id >= 0) 398229c91e99STejun Heo idr_remove(&worker_pool_idr, pool->id); 398329c91e99STejun Heo hash_del(&pool->hash_node); 398429c91e99STejun Heo 3985c5aa87bbSTejun Heo /* 3986692b4825STejun Heo * Become the manager and destroy all workers. This prevents 3987692b4825STejun Heo * @pool's workers from blocking on attach_mutex. We're the last 3988692b4825STejun Heo * manager and @pool gets freed with the flag set. 39899ab03be4SValentin Schneider * 39909ab03be4SValentin Schneider * Having a concurrent manager is quite unlikely to happen as we can 39919ab03be4SValentin Schneider * only get here with 39929ab03be4SValentin Schneider * pwq->refcnt == pool->refcnt == 0 39939ab03be4SValentin Schneider * which implies no work queued to the pool, which implies no worker can 39949ab03be4SValentin Schneider * become the manager. However a worker could have taken the role of 39959ab03be4SValentin Schneider * manager before the refcnts dropped to 0, since maybe_create_worker() 39969ab03be4SValentin Schneider * drops pool->lock 3997c5aa87bbSTejun Heo */ 39989ab03be4SValentin Schneider while (true) { 39999ab03be4SValentin Schneider rcuwait_wait_event(&manager_wait, 40009ab03be4SValentin Schneider !(pool->flags & POOL_MANAGER_ACTIVE), 4001d8bb65abSSebastian Andrzej Siewior TASK_UNINTERRUPTIBLE); 4002e02b9312SValentin Schneider 4003e02b9312SValentin Schneider mutex_lock(&wq_pool_attach_mutex); 40049ab03be4SValentin Schneider raw_spin_lock_irq(&pool->lock); 40059ab03be4SValentin Schneider if (!(pool->flags & POOL_MANAGER_ACTIVE)) { 4006692b4825STejun Heo pool->flags |= POOL_MANAGER_ACTIVE; 40079ab03be4SValentin Schneider break; 40089ab03be4SValentin Schneider } 40099ab03be4SValentin Schneider raw_spin_unlock_irq(&pool->lock); 4010e02b9312SValentin Schneider mutex_unlock(&wq_pool_attach_mutex); 40119ab03be4SValentin Schneider } 4012692b4825STejun Heo 40131037de36SLai Jiangshan while ((worker = first_idle_worker(pool))) 4014e02b9312SValentin Schneider set_worker_dying(worker, &cull_list); 401529c91e99STejun Heo WARN_ON(pool->nr_workers || pool->nr_idle); 4016a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 401760f5a4bcSLai Jiangshan 4018e02b9312SValentin Schneider wake_dying_workers(&cull_list); 4019e02b9312SValentin Schneider 4020e02b9312SValentin Schneider if (!list_empty(&pool->workers) || !list_empty(&pool->dying_workers)) 402160f5a4bcSLai Jiangshan pool->detach_completion = &detach_completion; 40221258fae7STejun Heo mutex_unlock(&wq_pool_attach_mutex); 402360f5a4bcSLai Jiangshan 402460f5a4bcSLai Jiangshan if (pool->detach_completion) 402560f5a4bcSLai Jiangshan wait_for_completion(pool->detach_completion); 402660f5a4bcSLai Jiangshan 402729c91e99STejun Heo /* shut down the timers */ 402829c91e99STejun Heo del_timer_sync(&pool->idle_timer); 40293f959aa3SValentin Schneider cancel_work_sync(&pool->idle_cull_work); 403029c91e99STejun Heo del_timer_sync(&pool->mayday_timer); 403129c91e99STejun Heo 403224acfb71SThomas Gleixner /* RCU protected to allow dereferences from get_work_pool() */ 403325b00775SPaul E. McKenney call_rcu(&pool->rcu, rcu_free_pool); 403429c91e99STejun Heo } 403529c91e99STejun Heo 403629c91e99STejun Heo /** 403729c91e99STejun Heo * get_unbound_pool - get a worker_pool with the specified attributes 403829c91e99STejun Heo * @attrs: the attributes of the worker_pool to get 403929c91e99STejun Heo * 404029c91e99STejun Heo * Obtain a worker_pool which has the same attributes as @attrs, bump the 404129c91e99STejun Heo * reference count and return it. If there already is a matching 404229c91e99STejun Heo * worker_pool, it will be used; otherwise, this function attempts to 4043d185af30SYacine Belkadi * create a new one. 4044a892caccSTejun Heo * 4045a892caccSTejun Heo * Should be called with wq_pool_mutex held. 4046d185af30SYacine Belkadi * 4047d185af30SYacine Belkadi * Return: On success, a worker_pool with the same attributes as @attrs. 4048d185af30SYacine Belkadi * On failure, %NULL. 404929c91e99STejun Heo */ 405029c91e99STejun Heo static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) 405129c91e99STejun Heo { 405284193c07STejun Heo struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_NUMA]; 405329c91e99STejun Heo u32 hash = wqattrs_hash(attrs); 405429c91e99STejun Heo struct worker_pool *pool; 405584193c07STejun Heo int pod, node = NUMA_NO_NODE; 405629c91e99STejun Heo 4057a892caccSTejun Heo lockdep_assert_held(&wq_pool_mutex); 405829c91e99STejun Heo 405929c91e99STejun Heo /* do we already have a matching pool? */ 406029c91e99STejun Heo hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { 406129c91e99STejun Heo if (wqattrs_equal(pool->attrs, attrs)) { 406229c91e99STejun Heo pool->refcnt++; 40633fb1823cSLai Jiangshan return pool; 406429c91e99STejun Heo } 406529c91e99STejun Heo } 406629c91e99STejun Heo 40679546b29eSTejun Heo /* If __pod_cpumask is contained inside a NUMA pod, that's our node */ 406884193c07STejun Heo for (pod = 0; pod < pt->nr_pods; pod++) { 40699546b29eSTejun Heo if (cpumask_subset(attrs->__pod_cpumask, pt->pod_cpus[pod])) { 407084193c07STejun Heo node = pt->pod_node[pod]; 4071e2273584SXunlei Pang break; 4072e2273584SXunlei Pang } 4073e2273584SXunlei Pang } 4074e2273584SXunlei Pang 407529c91e99STejun Heo /* nope, create a new one */ 407684193c07STejun Heo pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, node); 407729c91e99STejun Heo if (!pool || init_worker_pool(pool) < 0) 407829c91e99STejun Heo goto fail; 407929c91e99STejun Heo 408084193c07STejun Heo pool->node = node; 40815de7a03cSTejun Heo copy_workqueue_attrs(pool->attrs, attrs); 40825de7a03cSTejun Heo wqattrs_clear_for_pool(pool->attrs); 40832865a8fbSShaohua Li 408429c91e99STejun Heo if (worker_pool_assign_id(pool) < 0) 408529c91e99STejun Heo goto fail; 408629c91e99STejun Heo 408729c91e99STejun Heo /* create and start the initial worker */ 40883347fa09STejun Heo if (wq_online && !create_worker(pool)) 408929c91e99STejun Heo goto fail; 409029c91e99STejun Heo 409129c91e99STejun Heo /* install */ 409229c91e99STejun Heo hash_add(unbound_pool_hash, &pool->hash_node, hash); 40933fb1823cSLai Jiangshan 409429c91e99STejun Heo return pool; 409529c91e99STejun Heo fail: 409629c91e99STejun Heo if (pool) 409729c91e99STejun Heo put_unbound_pool(pool); 409829c91e99STejun Heo return NULL; 409929c91e99STejun Heo } 410029c91e99STejun Heo 41018864b4e5STejun Heo static void rcu_free_pwq(struct rcu_head *rcu) 41028864b4e5STejun Heo { 41038864b4e5STejun Heo kmem_cache_free(pwq_cache, 41048864b4e5STejun Heo container_of(rcu, struct pool_workqueue, rcu)); 41058864b4e5STejun Heo } 41068864b4e5STejun Heo 41078864b4e5STejun Heo /* 4108967b494eSTejun Heo * Scheduled on pwq_release_worker by put_pwq() when an unbound pwq hits zero 4109967b494eSTejun Heo * refcnt and needs to be destroyed. 41108864b4e5STejun Heo */ 4111687a9aa5STejun Heo static void pwq_release_workfn(struct kthread_work *work) 41128864b4e5STejun Heo { 41138864b4e5STejun Heo struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, 4114687a9aa5STejun Heo release_work); 41158864b4e5STejun Heo struct workqueue_struct *wq = pwq->wq; 41168864b4e5STejun Heo struct worker_pool *pool = pwq->pool; 4117b42b0bddSYang Yingliang bool is_last = false; 41188864b4e5STejun Heo 4119b42b0bddSYang Yingliang /* 4120687a9aa5STejun Heo * When @pwq is not linked, it doesn't hold any reference to the 4121b42b0bddSYang Yingliang * @wq, and @wq is invalid to access. 4122b42b0bddSYang Yingliang */ 4123b42b0bddSYang Yingliang if (!list_empty(&pwq->pwqs_node)) { 41243c25a55dSLai Jiangshan mutex_lock(&wq->mutex); 41258864b4e5STejun Heo list_del_rcu(&pwq->pwqs_node); 4126bc0caf09STejun Heo is_last = list_empty(&wq->pwqs); 41273c25a55dSLai Jiangshan mutex_unlock(&wq->mutex); 4128b42b0bddSYang Yingliang } 41298864b4e5STejun Heo 4130687a9aa5STejun Heo if (wq->flags & WQ_UNBOUND) { 4131a892caccSTejun Heo mutex_lock(&wq_pool_mutex); 41328864b4e5STejun Heo put_unbound_pool(pool); 4133a892caccSTejun Heo mutex_unlock(&wq_pool_mutex); 4134687a9aa5STejun Heo } 4135a892caccSTejun Heo 413625b00775SPaul E. McKenney call_rcu(&pwq->rcu, rcu_free_pwq); 41378864b4e5STejun Heo 41388864b4e5STejun Heo /* 41398864b4e5STejun Heo * If we're the last pwq going away, @wq is already dead and no one 4140e2dca7adSTejun Heo * is gonna access it anymore. Schedule RCU free. 41418864b4e5STejun Heo */ 4142669de8bdSBart Van Assche if (is_last) { 4143669de8bdSBart Van Assche wq_unregister_lockdep(wq); 414425b00775SPaul E. McKenney call_rcu(&wq->rcu, rcu_free_wq); 41456029a918STejun Heo } 4146669de8bdSBart Van Assche } 41478864b4e5STejun Heo 414867dc8325SCai Huoqing /* initialize newly allocated @pwq which is associated with @wq and @pool */ 4149f147f29eSTejun Heo static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, 4150f147f29eSTejun Heo struct worker_pool *pool) 4151d2c1d404STejun Heo { 4152d2c1d404STejun Heo BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); 4153d2c1d404STejun Heo 4154e50aba9aSTejun Heo memset(pwq, 0, sizeof(*pwq)); 4155e50aba9aSTejun Heo 4156d2c1d404STejun Heo pwq->pool = pool; 4157d2c1d404STejun Heo pwq->wq = wq; 4158d2c1d404STejun Heo pwq->flush_color = -1; 41598864b4e5STejun Heo pwq->refcnt = 1; 4160f97a4a1aSLai Jiangshan INIT_LIST_HEAD(&pwq->inactive_works); 41611befcf30STejun Heo INIT_LIST_HEAD(&pwq->pwqs_node); 4162d2c1d404STejun Heo INIT_LIST_HEAD(&pwq->mayday_node); 4163687a9aa5STejun Heo kthread_init_work(&pwq->release_work, pwq_release_workfn); 4164f147f29eSTejun Heo } 4165d2c1d404STejun Heo 4166f147f29eSTejun Heo /* sync @pwq with the current state of its associated wq and link it */ 41671befcf30STejun Heo static void link_pwq(struct pool_workqueue *pwq) 4168f147f29eSTejun Heo { 4169f147f29eSTejun Heo struct workqueue_struct *wq = pwq->wq; 4170f147f29eSTejun Heo 4171f147f29eSTejun Heo lockdep_assert_held(&wq->mutex); 417275ccf595STejun Heo 41731befcf30STejun Heo /* may be called multiple times, ignore if already linked */ 41741befcf30STejun Heo if (!list_empty(&pwq->pwqs_node)) 41751befcf30STejun Heo return; 41761befcf30STejun Heo 417729b1cb41SLai Jiangshan /* set the matching work_color */ 417875ccf595STejun Heo pwq->work_color = wq->work_color; 4179983ca25eSTejun Heo 4180983ca25eSTejun Heo /* link in @pwq */ 41819e8cd2f5STejun Heo list_add_rcu(&pwq->pwqs_node, &wq->pwqs); 4182df2d5ae4STejun Heo } 41836029a918STejun Heo 4184f147f29eSTejun Heo /* obtain a pool matching @attr and create a pwq associating the pool and @wq */ 4185f147f29eSTejun Heo static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq, 4186f147f29eSTejun Heo const struct workqueue_attrs *attrs) 4187f147f29eSTejun Heo { 4188f147f29eSTejun Heo struct worker_pool *pool; 4189f147f29eSTejun Heo struct pool_workqueue *pwq; 4190f147f29eSTejun Heo 4191f147f29eSTejun Heo lockdep_assert_held(&wq_pool_mutex); 4192f147f29eSTejun Heo 4193f147f29eSTejun Heo pool = get_unbound_pool(attrs); 4194f147f29eSTejun Heo if (!pool) 4195f147f29eSTejun Heo return NULL; 4196f147f29eSTejun Heo 4197e50aba9aSTejun Heo pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); 4198f147f29eSTejun Heo if (!pwq) { 4199f147f29eSTejun Heo put_unbound_pool(pool); 4200f147f29eSTejun Heo return NULL; 4201f147f29eSTejun Heo } 4202f147f29eSTejun Heo 4203f147f29eSTejun Heo init_pwq(pwq, wq, pool); 4204f147f29eSTejun Heo return pwq; 4205d2c1d404STejun Heo } 4206d2c1d404STejun Heo 42074c16bd32STejun Heo /** 4208fef59c9cSTejun Heo * wq_calc_pod_cpumask - calculate a wq_attrs' cpumask for a pod 4209042f7df1SLai Jiangshan * @attrs: the wq_attrs of the default pwq of the target workqueue 421084193c07STejun Heo * @cpu: the target CPU 42114c16bd32STejun Heo * @cpu_going_down: if >= 0, the CPU to consider as offline 42124c16bd32STejun Heo * 4213fef59c9cSTejun Heo * Calculate the cpumask a workqueue with @attrs should use on @pod. If 4214fef59c9cSTejun Heo * @cpu_going_down is >= 0, that cpu is considered offline during calculation. 42159546b29eSTejun Heo * The result is stored in @attrs->__pod_cpumask. 42164c16bd32STejun Heo * 4217fef59c9cSTejun Heo * If pod affinity is not enabled, @attrs->cpumask is always used. If enabled 4218fef59c9cSTejun Heo * and @pod has online CPUs requested by @attrs, the returned cpumask is the 4219fef59c9cSTejun Heo * intersection of the possible CPUs of @pod and @attrs->cpumask. 42204c16bd32STejun Heo * 4221fef59c9cSTejun Heo * The caller is responsible for ensuring that the cpumask of @pod stays stable. 42224c16bd32STejun Heo */ 42239546b29eSTejun Heo static void wq_calc_pod_cpumask(struct workqueue_attrs *attrs, int cpu, 42249546b29eSTejun Heo int cpu_going_down) 42254c16bd32STejun Heo { 422684193c07STejun Heo const struct wq_pod_type *pt = wqattrs_pod_type(attrs); 422784193c07STejun Heo int pod = pt->cpu_pod[cpu]; 42284c16bd32STejun Heo 4229fef59c9cSTejun Heo /* does @pod have any online CPUs @attrs wants? */ 42309546b29eSTejun Heo cpumask_and(attrs->__pod_cpumask, pt->pod_cpus[pod], attrs->cpumask); 42319546b29eSTejun Heo cpumask_and(attrs->__pod_cpumask, attrs->__pod_cpumask, cpu_online_mask); 42324c16bd32STejun Heo if (cpu_going_down >= 0) 42339546b29eSTejun Heo cpumask_clear_cpu(cpu_going_down, attrs->__pod_cpumask); 42344c16bd32STejun Heo 42359546b29eSTejun Heo if (cpumask_empty(attrs->__pod_cpumask)) { 42369546b29eSTejun Heo cpumask_copy(attrs->__pod_cpumask, attrs->cpumask); 423784193c07STejun Heo return; 423884193c07STejun Heo } 42394c16bd32STejun Heo 4240fef59c9cSTejun Heo /* yeap, return possible CPUs in @pod that @attrs wants */ 42419546b29eSTejun Heo cpumask_and(attrs->__pod_cpumask, attrs->cpumask, pt->pod_cpus[pod]); 42421ad0f0a7SMichael Bringmann 42439546b29eSTejun Heo if (cpumask_empty(attrs->__pod_cpumask)) 42441ad0f0a7SMichael Bringmann pr_warn_once("WARNING: workqueue cpumask: online intersect > " 42451ad0f0a7SMichael Bringmann "possible intersect\n"); 42464c16bd32STejun Heo } 42474c16bd32STejun Heo 4248f3c11cb2SGreg Kroah-Hartman /* install @pwq into @wq's cpu_pwq and return the old pwq */ 4249636b927eSTejun Heo static struct pool_workqueue *install_unbound_pwq(struct workqueue_struct *wq, 4250636b927eSTejun Heo int cpu, struct pool_workqueue *pwq) 42511befcf30STejun Heo { 42521befcf30STejun Heo struct pool_workqueue *old_pwq; 42531befcf30STejun Heo 42545b95e1afSLai Jiangshan lockdep_assert_held(&wq_pool_mutex); 42551befcf30STejun Heo lockdep_assert_held(&wq->mutex); 42561befcf30STejun Heo 42571befcf30STejun Heo /* link_pwq() can handle duplicate calls */ 42581befcf30STejun Heo link_pwq(pwq); 42591befcf30STejun Heo 4260f3c11cb2SGreg Kroah-Hartman old_pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu)); 4261f3c11cb2SGreg Kroah-Hartman rcu_assign_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu), pwq); 42621befcf30STejun Heo return old_pwq; 42631befcf30STejun Heo } 42641befcf30STejun Heo 42652d5f0764SLai Jiangshan /* context to store the prepared attrs & pwqs before applying */ 42662d5f0764SLai Jiangshan struct apply_wqattrs_ctx { 42672d5f0764SLai Jiangshan struct workqueue_struct *wq; /* target workqueue */ 42682d5f0764SLai Jiangshan struct workqueue_attrs *attrs; /* attrs to apply */ 4269042f7df1SLai Jiangshan struct list_head list; /* queued for batching commit */ 42702d5f0764SLai Jiangshan struct pool_workqueue *dfl_pwq; 42712d5f0764SLai Jiangshan struct pool_workqueue *pwq_tbl[]; 42722d5f0764SLai Jiangshan }; 42732d5f0764SLai Jiangshan 42742d5f0764SLai Jiangshan /* free the resources after success or abort */ 42752d5f0764SLai Jiangshan static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx) 42762d5f0764SLai Jiangshan { 42772d5f0764SLai Jiangshan if (ctx) { 4278636b927eSTejun Heo int cpu; 42792d5f0764SLai Jiangshan 4280636b927eSTejun Heo for_each_possible_cpu(cpu) 4281636b927eSTejun Heo put_pwq_unlocked(ctx->pwq_tbl[cpu]); 42822d5f0764SLai Jiangshan put_pwq_unlocked(ctx->dfl_pwq); 42832d5f0764SLai Jiangshan 42842d5f0764SLai Jiangshan free_workqueue_attrs(ctx->attrs); 42852d5f0764SLai Jiangshan 42862d5f0764SLai Jiangshan kfree(ctx); 42872d5f0764SLai Jiangshan } 42882d5f0764SLai Jiangshan } 42892d5f0764SLai Jiangshan 42902d5f0764SLai Jiangshan /* allocate the attrs and pwqs for later installation */ 42912d5f0764SLai Jiangshan static struct apply_wqattrs_ctx * 42922d5f0764SLai Jiangshan apply_wqattrs_prepare(struct workqueue_struct *wq, 429399c621efSLai Jiangshan const struct workqueue_attrs *attrs, 429499c621efSLai Jiangshan const cpumask_var_t unbound_cpumask) 42952d5f0764SLai Jiangshan { 42962d5f0764SLai Jiangshan struct apply_wqattrs_ctx *ctx; 42979546b29eSTejun Heo struct workqueue_attrs *new_attrs; 4298636b927eSTejun Heo int cpu; 42992d5f0764SLai Jiangshan 43002d5f0764SLai Jiangshan lockdep_assert_held(&wq_pool_mutex); 43012d5f0764SLai Jiangshan 430284193c07STejun Heo if (WARN_ON(attrs->affn_scope < 0 || 430384193c07STejun Heo attrs->affn_scope >= WQ_AFFN_NR_TYPES)) 430484193c07STejun Heo return ERR_PTR(-EINVAL); 430584193c07STejun Heo 4306636b927eSTejun Heo ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_cpu_ids), GFP_KERNEL); 43072d5f0764SLai Jiangshan 4308be69d00dSThomas Gleixner new_attrs = alloc_workqueue_attrs(); 43099546b29eSTejun Heo if (!ctx || !new_attrs) 43102d5f0764SLai Jiangshan goto out_free; 43112d5f0764SLai Jiangshan 4312042f7df1SLai Jiangshan /* 43132d5f0764SLai Jiangshan * If something goes wrong during CPU up/down, we'll fall back to 43142d5f0764SLai Jiangshan * the default pwq covering whole @attrs->cpumask. Always create 43152d5f0764SLai Jiangshan * it even if we don't use it immediately. 43162d5f0764SLai Jiangshan */ 43170f36ee24STejun Heo copy_workqueue_attrs(new_attrs, attrs); 43180f36ee24STejun Heo wqattrs_actualize_cpumask(new_attrs, unbound_cpumask); 43199546b29eSTejun Heo cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask); 43202d5f0764SLai Jiangshan ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs); 43212d5f0764SLai Jiangshan if (!ctx->dfl_pwq) 43222d5f0764SLai Jiangshan goto out_free; 43232d5f0764SLai Jiangshan 4324636b927eSTejun Heo for_each_possible_cpu(cpu) { 4325af73f5c9STejun Heo if (new_attrs->ordered) { 43262d5f0764SLai Jiangshan ctx->dfl_pwq->refcnt++; 4327636b927eSTejun Heo ctx->pwq_tbl[cpu] = ctx->dfl_pwq; 4328636b927eSTejun Heo } else { 43299546b29eSTejun Heo wq_calc_pod_cpumask(new_attrs, cpu, -1); 43309546b29eSTejun Heo ctx->pwq_tbl[cpu] = alloc_unbound_pwq(wq, new_attrs); 4331636b927eSTejun Heo if (!ctx->pwq_tbl[cpu]) 4332636b927eSTejun Heo goto out_free; 43332d5f0764SLai Jiangshan } 43342d5f0764SLai Jiangshan } 43352d5f0764SLai Jiangshan 4336042f7df1SLai Jiangshan /* save the user configured attrs and sanitize it. */ 4337042f7df1SLai Jiangshan copy_workqueue_attrs(new_attrs, attrs); 4338042f7df1SLai Jiangshan cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask); 43399546b29eSTejun Heo cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask); 43402d5f0764SLai Jiangshan ctx->attrs = new_attrs; 4341042f7df1SLai Jiangshan 43422d5f0764SLai Jiangshan ctx->wq = wq; 43432d5f0764SLai Jiangshan return ctx; 43442d5f0764SLai Jiangshan 43452d5f0764SLai Jiangshan out_free: 43462d5f0764SLai Jiangshan free_workqueue_attrs(new_attrs); 43472d5f0764SLai Jiangshan apply_wqattrs_cleanup(ctx); 434884193c07STejun Heo return ERR_PTR(-ENOMEM); 43492d5f0764SLai Jiangshan } 43502d5f0764SLai Jiangshan 43512d5f0764SLai Jiangshan /* set attrs and install prepared pwqs, @ctx points to old pwqs on return */ 43522d5f0764SLai Jiangshan static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx) 43532d5f0764SLai Jiangshan { 4354636b927eSTejun Heo int cpu; 43552d5f0764SLai Jiangshan 43562d5f0764SLai Jiangshan /* all pwqs have been created successfully, let's install'em */ 43572d5f0764SLai Jiangshan mutex_lock(&ctx->wq->mutex); 43582d5f0764SLai Jiangshan 43592d5f0764SLai Jiangshan copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs); 43602d5f0764SLai Jiangshan 4361f3c11cb2SGreg Kroah-Hartman /* save the previous pwq and install the new one */ 4362636b927eSTejun Heo for_each_possible_cpu(cpu) 4363636b927eSTejun Heo ctx->pwq_tbl[cpu] = install_unbound_pwq(ctx->wq, cpu, 4364636b927eSTejun Heo ctx->pwq_tbl[cpu]); 4365f3c11cb2SGreg Kroah-Hartman 4366f3c11cb2SGreg Kroah-Hartman /* @dfl_pwq might not have been used, ensure it's linked */ 4367f3c11cb2SGreg Kroah-Hartman link_pwq(ctx->dfl_pwq); 4368f3c11cb2SGreg Kroah-Hartman swap(ctx->wq->dfl_pwq, ctx->dfl_pwq); 43692d5f0764SLai Jiangshan 43702d5f0764SLai Jiangshan mutex_unlock(&ctx->wq->mutex); 43712d5f0764SLai Jiangshan } 43722d5f0764SLai Jiangshan 4373a0111cf6SLai Jiangshan static void apply_wqattrs_lock(void) 4374a0111cf6SLai Jiangshan { 4375a0111cf6SLai Jiangshan /* CPUs should stay stable across pwq creations and installations */ 4376ffd8bea8SSebastian Andrzej Siewior cpus_read_lock(); 4377a0111cf6SLai Jiangshan mutex_lock(&wq_pool_mutex); 4378a0111cf6SLai Jiangshan } 4379a0111cf6SLai Jiangshan 4380a0111cf6SLai Jiangshan static void apply_wqattrs_unlock(void) 4381a0111cf6SLai Jiangshan { 4382a0111cf6SLai Jiangshan mutex_unlock(&wq_pool_mutex); 4383ffd8bea8SSebastian Andrzej Siewior cpus_read_unlock(); 4384a0111cf6SLai Jiangshan } 4385a0111cf6SLai Jiangshan 4386a0111cf6SLai Jiangshan static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, 4387a0111cf6SLai Jiangshan const struct workqueue_attrs *attrs) 4388a0111cf6SLai Jiangshan { 4389a0111cf6SLai Jiangshan struct apply_wqattrs_ctx *ctx; 4390a0111cf6SLai Jiangshan 4391a0111cf6SLai Jiangshan /* only unbound workqueues can change attributes */ 4392a0111cf6SLai Jiangshan if (WARN_ON(!(wq->flags & WQ_UNBOUND))) 4393a0111cf6SLai Jiangshan return -EINVAL; 4394a0111cf6SLai Jiangshan 4395a0111cf6SLai Jiangshan /* creating multiple pwqs breaks ordering guarantee */ 43960a94efb5STejun Heo if (!list_empty(&wq->pwqs)) { 43970a94efb5STejun Heo if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 4398a0111cf6SLai Jiangshan return -EINVAL; 4399a0111cf6SLai Jiangshan 44000a94efb5STejun Heo wq->flags &= ~__WQ_ORDERED; 44010a94efb5STejun Heo } 44020a94efb5STejun Heo 440399c621efSLai Jiangshan ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask); 440484193c07STejun Heo if (IS_ERR(ctx)) 440584193c07STejun Heo return PTR_ERR(ctx); 4406a0111cf6SLai Jiangshan 4407a0111cf6SLai Jiangshan /* the ctx has been prepared successfully, let's commit it */ 4408a0111cf6SLai Jiangshan apply_wqattrs_commit(ctx); 4409a0111cf6SLai Jiangshan apply_wqattrs_cleanup(ctx); 4410a0111cf6SLai Jiangshan 44116201171eSwanghaibin return 0; 4412a0111cf6SLai Jiangshan } 4413a0111cf6SLai Jiangshan 44149e8cd2f5STejun Heo /** 44159e8cd2f5STejun Heo * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue 44169e8cd2f5STejun Heo * @wq: the target workqueue 44179e8cd2f5STejun Heo * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs() 44189e8cd2f5STejun Heo * 4419fef59c9cSTejun Heo * Apply @attrs to an unbound workqueue @wq. Unless disabled, this function maps 4420fef59c9cSTejun Heo * a separate pwq to each CPU pod with possibles CPUs in @attrs->cpumask so that 4421fef59c9cSTejun Heo * work items are affine to the pod it was issued on. Older pwqs are released as 4422fef59c9cSTejun Heo * in-flight work items finish. Note that a work item which repeatedly requeues 4423fef59c9cSTejun Heo * itself back-to-back will stay on its current pwq. 44249e8cd2f5STejun Heo * 4425d185af30SYacine Belkadi * Performs GFP_KERNEL allocations. 4426d185af30SYacine Belkadi * 4427ffd8bea8SSebastian Andrzej Siewior * Assumes caller has CPU hotplug read exclusion, i.e. cpus_read_lock(). 4428509b3204SDaniel Jordan * 4429d185af30SYacine Belkadi * Return: 0 on success and -errno on failure. 44309e8cd2f5STejun Heo */ 4431513c98d0SDaniel Jordan int apply_workqueue_attrs(struct workqueue_struct *wq, 44329e8cd2f5STejun Heo const struct workqueue_attrs *attrs) 44339e8cd2f5STejun Heo { 4434a0111cf6SLai Jiangshan int ret; 44359e8cd2f5STejun Heo 4436509b3204SDaniel Jordan lockdep_assert_cpus_held(); 4437509b3204SDaniel Jordan 4438509b3204SDaniel Jordan mutex_lock(&wq_pool_mutex); 4439a0111cf6SLai Jiangshan ret = apply_workqueue_attrs_locked(wq, attrs); 4440509b3204SDaniel Jordan mutex_unlock(&wq_pool_mutex); 44412d5f0764SLai Jiangshan 44422d5f0764SLai Jiangshan return ret; 44439e8cd2f5STejun Heo } 44449e8cd2f5STejun Heo 44454c16bd32STejun Heo /** 4446fef59c9cSTejun Heo * wq_update_pod - update pod affinity of a wq for CPU hot[un]plug 44474c16bd32STejun Heo * @wq: the target workqueue 44484cbfd3deSTejun Heo * @cpu: the CPU to update pool association for 44494cbfd3deSTejun Heo * @hotplug_cpu: the CPU coming up or going down 44504c16bd32STejun Heo * @online: whether @cpu is coming up or going down 44514c16bd32STejun Heo * 44524c16bd32STejun Heo * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and 4453fef59c9cSTejun Heo * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update pod affinity of 44544c16bd32STejun Heo * @wq accordingly. 44554c16bd32STejun Heo * 44564c16bd32STejun Heo * 4457fef59c9cSTejun Heo * If pod affinity can't be adjusted due to memory allocation failure, it falls 4458fef59c9cSTejun Heo * back to @wq->dfl_pwq which may not be optimal but is always correct. 4459fef59c9cSTejun Heo * 4460fef59c9cSTejun Heo * Note that when the last allowed CPU of a pod goes offline for a workqueue 4461fef59c9cSTejun Heo * with a cpumask spanning multiple pods, the workers which were already 4462fef59c9cSTejun Heo * executing the work items for the workqueue will lose their CPU affinity and 4463fef59c9cSTejun Heo * may execute on any CPU. This is similar to how per-cpu workqueues behave on 4464fef59c9cSTejun Heo * CPU_DOWN. If a workqueue user wants strict affinity, it's the user's 4465fef59c9cSTejun Heo * responsibility to flush the work item from CPU_DOWN_PREPARE. 44664c16bd32STejun Heo */ 4467fef59c9cSTejun Heo static void wq_update_pod(struct workqueue_struct *wq, int cpu, 44684cbfd3deSTejun Heo int hotplug_cpu, bool online) 44694c16bd32STejun Heo { 44704cbfd3deSTejun Heo int off_cpu = online ? -1 : hotplug_cpu; 44714c16bd32STejun Heo struct pool_workqueue *old_pwq = NULL, *pwq; 44724c16bd32STejun Heo struct workqueue_attrs *target_attrs; 44734c16bd32STejun Heo 44744c16bd32STejun Heo lockdep_assert_held(&wq_pool_mutex); 44754c16bd32STejun Heo 447684193c07STejun Heo if (!(wq->flags & WQ_UNBOUND) || wq->unbound_attrs->ordered) 44774c16bd32STejun Heo return; 44784c16bd32STejun Heo 44794c16bd32STejun Heo /* 44804c16bd32STejun Heo * We don't wanna alloc/free wq_attrs for each wq for each CPU. 44814c16bd32STejun Heo * Let's use a preallocated one. The following buf is protected by 44824c16bd32STejun Heo * CPU hotplug exclusion. 44834c16bd32STejun Heo */ 4484fef59c9cSTejun Heo target_attrs = wq_update_pod_attrs_buf; 44854c16bd32STejun Heo 44864c16bd32STejun Heo copy_workqueue_attrs(target_attrs, wq->unbound_attrs); 44870f36ee24STejun Heo wqattrs_actualize_cpumask(target_attrs, wq_unbound_cpumask); 44884c16bd32STejun Heo 4489636b927eSTejun Heo /* nothing to do if the target cpumask matches the current pwq */ 44909546b29eSTejun Heo wq_calc_pod_cpumask(target_attrs, cpu, off_cpu); 4491f3c11cb2SGreg Kroah-Hartman pwq = rcu_dereference_protected(*per_cpu_ptr(wq->cpu_pwq, cpu), 4492f3c11cb2SGreg Kroah-Hartman lockdep_is_held(&wq_pool_mutex)); 4493f3c11cb2SGreg Kroah-Hartman if (wqattrs_equal(target_attrs, pwq->pool->attrs)) 4494f7142ed4SLai Jiangshan return; 44954c16bd32STejun Heo 44964c16bd32STejun Heo /* create a new pwq */ 44974c16bd32STejun Heo pwq = alloc_unbound_pwq(wq, target_attrs); 44984c16bd32STejun Heo if (!pwq) { 4499fef59c9cSTejun Heo pr_warn("workqueue: allocation failed while updating CPU pod affinity of \"%s\"\n", 45004c16bd32STejun Heo wq->name); 450177f300b1SDaeseok Youn goto use_dfl_pwq; 45024c16bd32STejun Heo } 45034c16bd32STejun Heo 4504f7142ed4SLai Jiangshan /* Install the new pwq. */ 45054c16bd32STejun Heo mutex_lock(&wq->mutex); 4506636b927eSTejun Heo old_pwq = install_unbound_pwq(wq, cpu, pwq); 45074c16bd32STejun Heo goto out_unlock; 45084c16bd32STejun Heo 45094c16bd32STejun Heo use_dfl_pwq: 4510f7142ed4SLai Jiangshan mutex_lock(&wq->mutex); 4511f3c11cb2SGreg Kroah-Hartman raw_spin_lock_irq(&wq->dfl_pwq->pool->lock); 4512f3c11cb2SGreg Kroah-Hartman get_pwq(wq->dfl_pwq); 4513f3c11cb2SGreg Kroah-Hartman raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock); 4514f3c11cb2SGreg Kroah-Hartman old_pwq = install_unbound_pwq(wq, cpu, wq->dfl_pwq); 45154c16bd32STejun Heo out_unlock: 45164c16bd32STejun Heo mutex_unlock(&wq->mutex); 45174c16bd32STejun Heo put_pwq_unlocked(old_pwq); 45184c16bd32STejun Heo } 45194c16bd32STejun Heo 452030cdf249STejun Heo static int alloc_and_link_pwqs(struct workqueue_struct *wq) 45211da177e4SLinus Torvalds { 452249e3cf44STejun Heo bool highpri = wq->flags & WQ_HIGHPRI; 45238a2b7538STejun Heo int cpu, ret; 4524e1d8aa9fSFrederic Weisbecker 4525687a9aa5STejun Heo wq->cpu_pwq = alloc_percpu(struct pool_workqueue *); 4526ee1ceef7STejun Heo if (!wq->cpu_pwq) 4527687a9aa5STejun Heo goto enomem; 452830cdf249STejun Heo 4529636b927eSTejun Heo if (!(wq->flags & WQ_UNBOUND)) { 453030cdf249STejun Heo for_each_possible_cpu(cpu) { 4531687a9aa5STejun Heo struct pool_workqueue **pwq_p = 4532ee1ceef7STejun Heo per_cpu_ptr(wq->cpu_pwq, cpu); 4533687a9aa5STejun Heo struct worker_pool *pool = 4534687a9aa5STejun Heo &(per_cpu_ptr(cpu_worker_pools, cpu)[highpri]); 453530cdf249STejun Heo 4536687a9aa5STejun Heo *pwq_p = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, 4537687a9aa5STejun Heo pool->node); 4538687a9aa5STejun Heo if (!*pwq_p) 4539687a9aa5STejun Heo goto enomem; 4540687a9aa5STejun Heo 4541687a9aa5STejun Heo init_pwq(*pwq_p, wq, pool); 4542f147f29eSTejun Heo 4543f147f29eSTejun Heo mutex_lock(&wq->mutex); 4544687a9aa5STejun Heo link_pwq(*pwq_p); 4545f147f29eSTejun Heo mutex_unlock(&wq->mutex); 454630cdf249STejun Heo } 454730cdf249STejun Heo return 0; 4548509b3204SDaniel Jordan } 4549509b3204SDaniel Jordan 4550ffd8bea8SSebastian Andrzej Siewior cpus_read_lock(); 4551509b3204SDaniel Jordan if (wq->flags & __WQ_ORDERED) { 45528a2b7538STejun Heo ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); 45538a2b7538STejun Heo /* there should only be single pwq for ordering guarantee */ 4554f3c11cb2SGreg Kroah-Hartman WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node || 4555f3c11cb2SGreg Kroah-Hartman wq->pwqs.prev != &wq->dfl_pwq->pwqs_node), 45568a2b7538STejun Heo "ordering guarantee broken for workqueue %s\n", wq->name); 45579e8cd2f5STejun Heo } else { 4558509b3204SDaniel Jordan ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); 45599e8cd2f5STejun Heo } 4560ffd8bea8SSebastian Andrzej Siewior cpus_read_unlock(); 4561509b3204SDaniel Jordan 456264344553SZqiang /* for unbound pwq, flush the pwq_release_worker ensures that the 456364344553SZqiang * pwq_release_workfn() completes before calling kfree(wq). 456464344553SZqiang */ 456564344553SZqiang if (ret) 456664344553SZqiang kthread_flush_worker(pwq_release_worker); 456764344553SZqiang 4568509b3204SDaniel Jordan return ret; 4569687a9aa5STejun Heo 4570687a9aa5STejun Heo enomem: 4571687a9aa5STejun Heo if (wq->cpu_pwq) { 45727b42f401SZqiang for_each_possible_cpu(cpu) { 45737b42f401SZqiang struct pool_workqueue *pwq = *per_cpu_ptr(wq->cpu_pwq, cpu); 45747b42f401SZqiang 45757b42f401SZqiang if (pwq) 45767b42f401SZqiang kmem_cache_free(pwq_cache, pwq); 45777b42f401SZqiang } 4578687a9aa5STejun Heo free_percpu(wq->cpu_pwq); 4579687a9aa5STejun Heo wq->cpu_pwq = NULL; 4580687a9aa5STejun Heo } 4581687a9aa5STejun Heo return -ENOMEM; 45820f900049STejun Heo } 45830f900049STejun Heo 4584f3421797STejun Heo static int wq_clamp_max_active(int max_active, unsigned int flags, 4585f3421797STejun Heo const char *name) 4586b71ab8c2STejun Heo { 4587636b927eSTejun Heo if (max_active < 1 || max_active > WQ_MAX_ACTIVE) 4588044c782cSValentin Ilie pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n", 4589636b927eSTejun Heo max_active, name, 1, WQ_MAX_ACTIVE); 4590b71ab8c2STejun Heo 4591636b927eSTejun Heo return clamp_val(max_active, 1, WQ_MAX_ACTIVE); 4592b71ab8c2STejun Heo } 4593b71ab8c2STejun Heo 4594983c7515STejun Heo /* 4595983c7515STejun Heo * Workqueues which may be used during memory reclaim should have a rescuer 4596983c7515STejun Heo * to guarantee forward progress. 4597983c7515STejun Heo */ 4598983c7515STejun Heo static int init_rescuer(struct workqueue_struct *wq) 4599983c7515STejun Heo { 4600983c7515STejun Heo struct worker *rescuer; 4601b92b36eaSDan Carpenter int ret; 4602983c7515STejun Heo 4603983c7515STejun Heo if (!(wq->flags & WQ_MEM_RECLAIM)) 4604983c7515STejun Heo return 0; 4605983c7515STejun Heo 4606983c7515STejun Heo rescuer = alloc_worker(NUMA_NO_NODE); 46074c0736a7SPetr Mladek if (!rescuer) { 46084c0736a7SPetr Mladek pr_err("workqueue: Failed to allocate a rescuer for wq \"%s\"\n", 46094c0736a7SPetr Mladek wq->name); 4610983c7515STejun Heo return -ENOMEM; 46114c0736a7SPetr Mladek } 4612983c7515STejun Heo 4613983c7515STejun Heo rescuer->rescue_wq = wq; 4614b6a46f72SAaron Tomlin rescuer->task = kthread_create(rescuer_thread, rescuer, "kworker/R-%s", wq->name); 4615f187b697SSean Fu if (IS_ERR(rescuer->task)) { 4616b92b36eaSDan Carpenter ret = PTR_ERR(rescuer->task); 46174c0736a7SPetr Mladek pr_err("workqueue: Failed to create a rescuer kthread for wq \"%s\": %pe", 46184c0736a7SPetr Mladek wq->name, ERR_PTR(ret)); 4619983c7515STejun Heo kfree(rescuer); 4620b92b36eaSDan Carpenter return ret; 4621983c7515STejun Heo } 4622983c7515STejun Heo 4623983c7515STejun Heo wq->rescuer = rescuer; 4624983c7515STejun Heo kthread_bind_mask(rescuer->task, cpu_possible_mask); 4625983c7515STejun Heo wake_up_process(rescuer->task); 4626983c7515STejun Heo 4627983c7515STejun Heo return 0; 4628983c7515STejun Heo } 4629983c7515STejun Heo 463082e098f5STejun Heo /** 463182e098f5STejun Heo * wq_adjust_max_active - update a wq's max_active to the current setting 463282e098f5STejun Heo * @wq: target workqueue 463382e098f5STejun Heo * 463482e098f5STejun Heo * If @wq isn't freezing, set @wq->max_active to the saved_max_active and 463582e098f5STejun Heo * activate inactive work items accordingly. If @wq is freezing, clear 463682e098f5STejun Heo * @wq->max_active to zero. 463782e098f5STejun Heo */ 463882e098f5STejun Heo static void wq_adjust_max_active(struct workqueue_struct *wq) 463982e098f5STejun Heo { 4640e3ee73b5SGreg Kroah-Hartman struct pool_workqueue *pwq; 464182e098f5STejun Heo 464282e098f5STejun Heo lockdep_assert_held(&wq->mutex); 464382e098f5STejun Heo 464482e098f5STejun Heo if ((wq->flags & WQ_FREEZABLE) && workqueue_freezing) { 46456741dd3fSGreg Kroah-Hartman WRITE_ONCE(wq->max_active, 0); 46466741dd3fSGreg Kroah-Hartman return; 464782e098f5STejun Heo } 464882e098f5STejun Heo 46496741dd3fSGreg Kroah-Hartman if (wq->max_active == wq->saved_max_active) 465082e098f5STejun Heo return; 465182e098f5STejun Heo 465282e098f5STejun Heo /* 46536741dd3fSGreg Kroah-Hartman * Update @wq->max_active and then kick inactive work items if more 465482e098f5STejun Heo * active work items are allowed. This doesn't break work item ordering 465582e098f5STejun Heo * because new work items are always queued behind existing inactive 465682e098f5STejun Heo * work items if there are any. 465782e098f5STejun Heo */ 46586741dd3fSGreg Kroah-Hartman WRITE_ONCE(wq->max_active, wq->saved_max_active); 465982e098f5STejun Heo 466082e098f5STejun Heo for_each_pwq(pwq, wq) { 466182e098f5STejun Heo unsigned long flags; 466282e098f5STejun Heo 4663e3ee73b5SGreg Kroah-Hartman /* this function can be called during early boot w/ irq disabled */ 466482e098f5STejun Heo raw_spin_lock_irqsave(&pwq->pool->lock, flags); 4665e3ee73b5SGreg Kroah-Hartman 46665debbff9SGreg Kroah-Hartman while (!list_empty(&pwq->inactive_works) && 46675debbff9SGreg Kroah-Hartman pwq->nr_active < wq->max_active) 46685debbff9SGreg Kroah-Hartman pwq_activate_first_inactive(pwq); 4669e3ee73b5SGreg Kroah-Hartman 467082e098f5STejun Heo kick_pool(pwq->pool); 4671e3ee73b5SGreg Kroah-Hartman 467282e098f5STejun Heo raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); 467382e098f5STejun Heo } 467482e098f5STejun Heo } 467582e098f5STejun Heo 4676a2775bbcSMathieu Malaterre __printf(1, 4) 4677669de8bdSBart Van Assche struct workqueue_struct *alloc_workqueue(const char *fmt, 467897e37d7bSTejun Heo unsigned int flags, 4679669de8bdSBart Van Assche int max_active, ...) 46803af24433SOleg Nesterov { 4681ecf6881fSTejun Heo va_list args; 46823af24433SOleg Nesterov struct workqueue_struct *wq; 4683bfb429f3SGreg Kroah-Hartman int len; 4684b196be89STejun Heo 46855c0338c6STejun Heo /* 4686fef59c9cSTejun Heo * Unbound && max_active == 1 used to imply ordered, which is no longer 4687fef59c9cSTejun Heo * the case on many machines due to per-pod pools. While 46885c0338c6STejun Heo * alloc_ordered_workqueue() is the right way to create an ordered 4689fef59c9cSTejun Heo * workqueue, keep the previous behavior to avoid subtle breakages. 46905c0338c6STejun Heo */ 46915c0338c6STejun Heo if ((flags & WQ_UNBOUND) && max_active == 1) 46925c0338c6STejun Heo flags |= __WQ_ORDERED; 46935c0338c6STejun Heo 4694cee22a15SViresh Kumar /* see the comment above the definition of WQ_POWER_EFFICIENT */ 4695cee22a15SViresh Kumar if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient) 4696cee22a15SViresh Kumar flags |= WQ_UNBOUND; 4697cee22a15SViresh Kumar 4698ecf6881fSTejun Heo /* allocate wq and format name */ 4699bfb429f3SGreg Kroah-Hartman wq = kzalloc(sizeof(*wq), GFP_KERNEL); 4700b196be89STejun Heo if (!wq) 4701d2c1d404STejun Heo return NULL; 4702b196be89STejun Heo 47036029a918STejun Heo if (flags & WQ_UNBOUND) { 4704be69d00dSThomas Gleixner wq->unbound_attrs = alloc_workqueue_attrs(); 47056029a918STejun Heo if (!wq->unbound_attrs) 47066029a918STejun Heo goto err_free_wq; 47076029a918STejun Heo } 47086029a918STejun Heo 4709669de8bdSBart Van Assche va_start(args, max_active); 4710bfb429f3SGreg Kroah-Hartman len = vsnprintf(wq->name, sizeof(wq->name), fmt, args); 4711b196be89STejun Heo va_end(args); 47123af24433SOleg Nesterov 4713bfb429f3SGreg Kroah-Hartman if (len >= WQ_NAME_LEN) 4714bfb429f3SGreg Kroah-Hartman pr_warn_once("workqueue: name exceeds WQ_NAME_LEN. Truncating to: %s\n", wq->name); 471543a181f8SAudra Mitchell 4716d320c038STejun Heo max_active = max_active ?: WQ_DFL_ACTIVE; 4717b196be89STejun Heo max_active = wq_clamp_max_active(max_active, flags, wq->name); 47183af24433SOleg Nesterov 4719b196be89STejun Heo /* init wq */ 472097e37d7bSTejun Heo wq->flags = flags; 472182e098f5STejun Heo wq->max_active = max_active; 47226741dd3fSGreg Kroah-Hartman wq->saved_max_active = max_active; 47233c25a55dSLai Jiangshan mutex_init(&wq->mutex); 4724112202d9STejun Heo atomic_set(&wq->nr_pwqs_to_flush, 0); 472530cdf249STejun Heo INIT_LIST_HEAD(&wq->pwqs); 472673f53c4aSTejun Heo INIT_LIST_HEAD(&wq->flusher_queue); 472773f53c4aSTejun Heo INIT_LIST_HEAD(&wq->flusher_overflow); 4728493a1724STejun Heo INIT_LIST_HEAD(&wq->maydays); 47293af24433SOleg Nesterov 4730669de8bdSBart Van Assche wq_init_lockdep(wq); 4731cce1a165SOleg Nesterov INIT_LIST_HEAD(&wq->list); 47323af24433SOleg Nesterov 4733b522229aSTejun Heo if (alloc_and_link_pwqs(wq) < 0) 4734bfb429f3SGreg Kroah-Hartman goto err_unreg_lockdep; 47351537663fSTejun Heo 473640c17f75STejun Heo if (wq_online && init_rescuer(wq) < 0) 4737d2c1d404STejun Heo goto err_destroy; 4738e22bee78STejun Heo 4739226223abSTejun Heo if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq)) 4740226223abSTejun Heo goto err_destroy; 4741226223abSTejun Heo 47426af8bf3dSOleg Nesterov /* 474368e13a67SLai Jiangshan * wq_pool_mutex protects global freeze state and workqueues list. 474468e13a67SLai Jiangshan * Grab it, adjust max_active and add the new @wq to workqueues 474568e13a67SLai Jiangshan * list. 47466af8bf3dSOleg Nesterov */ 474768e13a67SLai Jiangshan mutex_lock(&wq_pool_mutex); 4748a0a1a5fdSTejun Heo 4749a357fc03SLai Jiangshan mutex_lock(&wq->mutex); 475082e098f5STejun Heo wq_adjust_max_active(wq); 4751a357fc03SLai Jiangshan mutex_unlock(&wq->mutex); 4752a0a1a5fdSTejun Heo 4753e2dca7adSTejun Heo list_add_tail_rcu(&wq->list, &workqueues); 4754a0a1a5fdSTejun Heo 475568e13a67SLai Jiangshan mutex_unlock(&wq_pool_mutex); 47563af24433SOleg Nesterov 47573af24433SOleg Nesterov return wq; 4758d2c1d404STejun Heo 475982efcab3SBart Van Assche err_unreg_lockdep: 4760009bb421SBart Van Assche wq_unregister_lockdep(wq); 4761009bb421SBart Van Assche wq_free_lockdep(wq); 476282efcab3SBart Van Assche err_free_wq: 47636029a918STejun Heo free_workqueue_attrs(wq->unbound_attrs); 47644690c4abSTejun Heo kfree(wq); 4765d2c1d404STejun Heo return NULL; 4766d2c1d404STejun Heo err_destroy: 4767d2c1d404STejun Heo destroy_workqueue(wq); 47684690c4abSTejun Heo return NULL; 47691da177e4SLinus Torvalds } 4770669de8bdSBart Van Assche EXPORT_SYMBOL_GPL(alloc_workqueue); 47711da177e4SLinus Torvalds 4772c29eb853STejun Heo static bool pwq_busy(struct pool_workqueue *pwq) 4773c29eb853STejun Heo { 4774c29eb853STejun Heo int i; 4775c29eb853STejun Heo 4776c29eb853STejun Heo for (i = 0; i < WORK_NR_COLORS; i++) 4777c29eb853STejun Heo if (pwq->nr_in_flight[i]) 4778c29eb853STejun Heo return true; 4779c29eb853STejun Heo 4780f3c11cb2SGreg Kroah-Hartman if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1)) 4781c29eb853STejun Heo return true; 478235bf38ddSGreg Kroah-Hartman if (pwq->nr_active || !list_empty(&pwq->inactive_works)) 4783c29eb853STejun Heo return true; 4784c29eb853STejun Heo 4785c29eb853STejun Heo return false; 4786c29eb853STejun Heo } 4787c29eb853STejun Heo 47883af24433SOleg Nesterov /** 47893af24433SOleg Nesterov * destroy_workqueue - safely terminate a workqueue 47903af24433SOleg Nesterov * @wq: target workqueue 47913af24433SOleg Nesterov * 47923af24433SOleg Nesterov * Safely destroy a workqueue. All work currently pending will be done first. 47933af24433SOleg Nesterov */ 47943af24433SOleg Nesterov void destroy_workqueue(struct workqueue_struct *wq) 47953af24433SOleg Nesterov { 479649e3cf44STejun Heo struct pool_workqueue *pwq; 4797636b927eSTejun Heo int cpu; 47983af24433SOleg Nesterov 4799def98c84STejun Heo /* 4800def98c84STejun Heo * Remove it from sysfs first so that sanity check failure doesn't 4801def98c84STejun Heo * lead to sysfs name conflicts. 4802def98c84STejun Heo */ 4803def98c84STejun Heo workqueue_sysfs_unregister(wq); 4804def98c84STejun Heo 480533e3f0a3SRichard Clark /* mark the workqueue destruction is in progress */ 480633e3f0a3SRichard Clark mutex_lock(&wq->mutex); 480733e3f0a3SRichard Clark wq->flags |= __WQ_DESTROYING; 480833e3f0a3SRichard Clark mutex_unlock(&wq->mutex); 480933e3f0a3SRichard Clark 48109c5a2ba7STejun Heo /* drain it before proceeding with destruction */ 48119c5a2ba7STejun Heo drain_workqueue(wq); 4812c8efcc25STejun Heo 4813def98c84STejun Heo /* kill rescuer, if sanity checks fail, leave it w/o rescuer */ 4814def98c84STejun Heo if (wq->rescuer) { 4815def98c84STejun Heo struct worker *rescuer = wq->rescuer; 4816def98c84STejun Heo 4817def98c84STejun Heo /* this prevents new queueing */ 4818a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&wq_mayday_lock); 4819def98c84STejun Heo wq->rescuer = NULL; 4820a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&wq_mayday_lock); 4821def98c84STejun Heo 4822def98c84STejun Heo /* rescuer will empty maydays list before exiting */ 4823def98c84STejun Heo kthread_stop(rescuer->task); 48248efe1223STejun Heo kfree(rescuer); 4825def98c84STejun Heo } 4826def98c84STejun Heo 4827c29eb853STejun Heo /* 4828c29eb853STejun Heo * Sanity checks - grab all the locks so that we wait for all 4829c29eb853STejun Heo * in-flight operations which may do put_pwq(). 4830c29eb853STejun Heo */ 4831c29eb853STejun Heo mutex_lock(&wq_pool_mutex); 4832b09f4fd3SLai Jiangshan mutex_lock(&wq->mutex); 483349e3cf44STejun Heo for_each_pwq(pwq, wq) { 4834a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pwq->pool->lock); 4835c29eb853STejun Heo if (WARN_ON(pwq_busy(pwq))) { 48361d9a6159SKefeng Wang pr_warn("%s: %s has the following busy pwq\n", 4837e66b39afSTejun Heo __func__, wq->name); 4838c29eb853STejun Heo show_pwq(pwq); 4839a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pwq->pool->lock); 4840b09f4fd3SLai Jiangshan mutex_unlock(&wq->mutex); 4841c29eb853STejun Heo mutex_unlock(&wq_pool_mutex); 484255df0933SImran Khan show_one_workqueue(wq); 48436183c009STejun Heo return; 484476af4d93STejun Heo } 4845a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pwq->pool->lock); 484676af4d93STejun Heo } 4847b09f4fd3SLai Jiangshan mutex_unlock(&wq->mutex); 48486183c009STejun Heo 4849a0a1a5fdSTejun Heo /* 4850a0a1a5fdSTejun Heo * wq list is used to freeze wq, remove from list after 4851a0a1a5fdSTejun Heo * flushing is complete in case freeze races us. 4852a0a1a5fdSTejun Heo */ 4853e2dca7adSTejun Heo list_del_rcu(&wq->list); 485468e13a67SLai Jiangshan mutex_unlock(&wq_pool_mutex); 48553af24433SOleg Nesterov 48568864b4e5STejun Heo /* 4857636b927eSTejun Heo * We're the sole accessor of @wq. Directly access cpu_pwq and dfl_pwq 4858636b927eSTejun Heo * to put the base refs. @wq will be auto-destroyed from the last 4859636b927eSTejun Heo * pwq_put. RCU read lock prevents @wq from going away from under us. 48608864b4e5STejun Heo */ 4861636b927eSTejun Heo rcu_read_lock(); 4862636b927eSTejun Heo 4863636b927eSTejun Heo for_each_possible_cpu(cpu) { 4864f3c11cb2SGreg Kroah-Hartman pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu)); 4865f3c11cb2SGreg Kroah-Hartman RCU_INIT_POINTER(*per_cpu_ptr(wq->cpu_pwq, cpu), NULL); 4866f3c11cb2SGreg Kroah-Hartman put_pwq_unlocked(pwq); 48674c16bd32STejun Heo } 48684c16bd32STejun Heo 4869f3c11cb2SGreg Kroah-Hartman put_pwq_unlocked(wq->dfl_pwq); 4870f3c11cb2SGreg Kroah-Hartman wq->dfl_pwq = NULL; 4871636b927eSTejun Heo 4872636b927eSTejun Heo rcu_read_unlock(); 48733af24433SOleg Nesterov } 48743af24433SOleg Nesterov EXPORT_SYMBOL_GPL(destroy_workqueue); 48753af24433SOleg Nesterov 4876dcd989cbSTejun Heo /** 4877dcd989cbSTejun Heo * workqueue_set_max_active - adjust max_active of a workqueue 4878dcd989cbSTejun Heo * @wq: target workqueue 4879dcd989cbSTejun Heo * @max_active: new max_active value. 4880dcd989cbSTejun Heo * 48816741dd3fSGreg Kroah-Hartman * Set max_active of @wq to @max_active. 4882dcd989cbSTejun Heo * 4883dcd989cbSTejun Heo * CONTEXT: 4884dcd989cbSTejun Heo * Don't call from IRQ context. 4885dcd989cbSTejun Heo */ 4886dcd989cbSTejun Heo void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) 4887dcd989cbSTejun Heo { 48888719dceaSTejun Heo /* disallow meddling with max_active for ordered workqueues */ 48890a94efb5STejun Heo if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 48908719dceaSTejun Heo return; 48918719dceaSTejun Heo 4892f3421797STejun Heo max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); 4893dcd989cbSTejun Heo 4894a357fc03SLai Jiangshan mutex_lock(&wq->mutex); 4895dcd989cbSTejun Heo 48960a94efb5STejun Heo wq->flags &= ~__WQ_ORDERED; 4897dcd989cbSTejun Heo wq->saved_max_active = max_active; 489882e098f5STejun Heo wq_adjust_max_active(wq); 4899dcd989cbSTejun Heo 4900a357fc03SLai Jiangshan mutex_unlock(&wq->mutex); 4901dcd989cbSTejun Heo } 4902dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(workqueue_set_max_active); 4903dcd989cbSTejun Heo 4904dcd989cbSTejun Heo /** 490527d4ee03SLukas Wunner * current_work - retrieve %current task's work struct 490627d4ee03SLukas Wunner * 490727d4ee03SLukas Wunner * Determine if %current task is a workqueue worker and what it's working on. 490827d4ee03SLukas Wunner * Useful to find out the context that the %current task is running in. 490927d4ee03SLukas Wunner * 491027d4ee03SLukas Wunner * Return: work struct if %current task is a workqueue worker, %NULL otherwise. 491127d4ee03SLukas Wunner */ 491227d4ee03SLukas Wunner struct work_struct *current_work(void) 491327d4ee03SLukas Wunner { 491427d4ee03SLukas Wunner struct worker *worker = current_wq_worker(); 491527d4ee03SLukas Wunner 491627d4ee03SLukas Wunner return worker ? worker->current_work : NULL; 491727d4ee03SLukas Wunner } 491827d4ee03SLukas Wunner EXPORT_SYMBOL(current_work); 491927d4ee03SLukas Wunner 492027d4ee03SLukas Wunner /** 4921e6267616STejun Heo * current_is_workqueue_rescuer - is %current workqueue rescuer? 4922e6267616STejun Heo * 4923e6267616STejun Heo * Determine whether %current is a workqueue rescuer. Can be used from 4924e6267616STejun Heo * work functions to determine whether it's being run off the rescuer task. 4925d185af30SYacine Belkadi * 4926d185af30SYacine Belkadi * Return: %true if %current is a workqueue rescuer. %false otherwise. 4927e6267616STejun Heo */ 4928e6267616STejun Heo bool current_is_workqueue_rescuer(void) 4929e6267616STejun Heo { 4930e6267616STejun Heo struct worker *worker = current_wq_worker(); 4931e6267616STejun Heo 49326a092dfdSLai Jiangshan return worker && worker->rescue_wq; 4933e6267616STejun Heo } 4934e6267616STejun Heo 4935e6267616STejun Heo /** 4936dcd989cbSTejun Heo * workqueue_congested - test whether a workqueue is congested 4937dcd989cbSTejun Heo * @cpu: CPU in question 4938dcd989cbSTejun Heo * @wq: target workqueue 4939dcd989cbSTejun Heo * 4940dcd989cbSTejun Heo * Test whether @wq's cpu workqueue for @cpu is congested. There is 4941dcd989cbSTejun Heo * no synchronization around this function and the test result is 4942dcd989cbSTejun Heo * unreliable and only useful as advisory hints or for debugging. 4943dcd989cbSTejun Heo * 4944d3251859STejun Heo * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU. 4945636b927eSTejun Heo * 4946636b927eSTejun Heo * With the exception of ordered workqueues, all workqueues have per-cpu 4947636b927eSTejun Heo * pool_workqueues, each with its own congested state. A workqueue being 4948636b927eSTejun Heo * congested on one CPU doesn't mean that the workqueue is contested on any 4949636b927eSTejun Heo * other CPUs. 4950d3251859STejun Heo * 4951d185af30SYacine Belkadi * Return: 4952dcd989cbSTejun Heo * %true if congested, %false otherwise. 4953dcd989cbSTejun Heo */ 4954d84ff051STejun Heo bool workqueue_congested(int cpu, struct workqueue_struct *wq) 4955dcd989cbSTejun Heo { 49567fb98ea7STejun Heo struct pool_workqueue *pwq; 495776af4d93STejun Heo bool ret; 495876af4d93STejun Heo 495924acfb71SThomas Gleixner rcu_read_lock(); 496024acfb71SThomas Gleixner preempt_disable(); 49617fb98ea7STejun Heo 4962d3251859STejun Heo if (cpu == WORK_CPU_UNBOUND) 4963d3251859STejun Heo cpu = smp_processor_id(); 4964d3251859STejun Heo 4965687a9aa5STejun Heo pwq = *per_cpu_ptr(wq->cpu_pwq, cpu); 4966f97a4a1aSLai Jiangshan ret = !list_empty(&pwq->inactive_works); 4967636b927eSTejun Heo 496824acfb71SThomas Gleixner preempt_enable(); 496924acfb71SThomas Gleixner rcu_read_unlock(); 497076af4d93STejun Heo 497176af4d93STejun Heo return ret; 4972dcd989cbSTejun Heo } 4973dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(workqueue_congested); 4974dcd989cbSTejun Heo 4975dcd989cbSTejun Heo /** 4976dcd989cbSTejun Heo * work_busy - test whether a work is currently pending or running 4977dcd989cbSTejun Heo * @work: the work to be tested 4978dcd989cbSTejun Heo * 4979dcd989cbSTejun Heo * Test whether @work is currently pending or running. There is no 4980dcd989cbSTejun Heo * synchronization around this function and the test result is 4981dcd989cbSTejun Heo * unreliable and only useful as advisory hints or for debugging. 4982dcd989cbSTejun Heo * 4983d185af30SYacine Belkadi * Return: 4984dcd989cbSTejun Heo * OR'd bitmask of WORK_BUSY_* bits. 4985dcd989cbSTejun Heo */ 4986dcd989cbSTejun Heo unsigned int work_busy(struct work_struct *work) 4987dcd989cbSTejun Heo { 4988fa1b54e6STejun Heo struct worker_pool *pool; 4989dcd989cbSTejun Heo unsigned long flags; 4990dcd989cbSTejun Heo unsigned int ret = 0; 4991dcd989cbSTejun Heo 4992dcd989cbSTejun Heo if (work_pending(work)) 4993dcd989cbSTejun Heo ret |= WORK_BUSY_PENDING; 4994038366c5SLai Jiangshan 499524acfb71SThomas Gleixner rcu_read_lock(); 4996fa1b54e6STejun Heo pool = get_work_pool(work); 4997038366c5SLai Jiangshan if (pool) { 4998a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irqsave(&pool->lock, flags); 4999c9e7cf27STejun Heo if (find_worker_executing_work(pool, work)) 5000dcd989cbSTejun Heo ret |= WORK_BUSY_RUNNING; 5001a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irqrestore(&pool->lock, flags); 5002038366c5SLai Jiangshan } 500324acfb71SThomas Gleixner rcu_read_unlock(); 5004dcd989cbSTejun Heo 5005dcd989cbSTejun Heo return ret; 5006dcd989cbSTejun Heo } 5007dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(work_busy); 5008dcd989cbSTejun Heo 50093d1cb205STejun Heo /** 50103d1cb205STejun Heo * set_worker_desc - set description for the current work item 50113d1cb205STejun Heo * @fmt: printf-style format string 50123d1cb205STejun Heo * @...: arguments for the format string 50133d1cb205STejun Heo * 50143d1cb205STejun Heo * This function can be called by a running work function to describe what 50153d1cb205STejun Heo * the work item is about. If the worker task gets dumped, this 50163d1cb205STejun Heo * information will be printed out together to help debugging. The 50173d1cb205STejun Heo * description can be at most WORKER_DESC_LEN including the trailing '\0'. 50183d1cb205STejun Heo */ 50193d1cb205STejun Heo void set_worker_desc(const char *fmt, ...) 50203d1cb205STejun Heo { 50213d1cb205STejun Heo struct worker *worker = current_wq_worker(); 50223d1cb205STejun Heo va_list args; 50233d1cb205STejun Heo 50243d1cb205STejun Heo if (worker) { 50253d1cb205STejun Heo va_start(args, fmt); 50263d1cb205STejun Heo vsnprintf(worker->desc, sizeof(worker->desc), fmt, args); 50273d1cb205STejun Heo va_end(args); 50283d1cb205STejun Heo } 50293d1cb205STejun Heo } 50305c750d58SSteffen Maier EXPORT_SYMBOL_GPL(set_worker_desc); 50313d1cb205STejun Heo 50323d1cb205STejun Heo /** 50333d1cb205STejun Heo * print_worker_info - print out worker information and description 50343d1cb205STejun Heo * @log_lvl: the log level to use when printing 50353d1cb205STejun Heo * @task: target task 50363d1cb205STejun Heo * 50373d1cb205STejun Heo * If @task is a worker and currently executing a work item, print out the 50383d1cb205STejun Heo * name of the workqueue being serviced and worker description set with 50393d1cb205STejun Heo * set_worker_desc() by the currently executing work item. 50403d1cb205STejun Heo * 50413d1cb205STejun Heo * This function can be safely called on any task as long as the 50423d1cb205STejun Heo * task_struct itself is accessible. While safe, this function isn't 50433d1cb205STejun Heo * synchronized and may print out mixups or garbages of limited length. 50443d1cb205STejun Heo */ 50453d1cb205STejun Heo void print_worker_info(const char *log_lvl, struct task_struct *task) 50463d1cb205STejun Heo { 50473d1cb205STejun Heo work_func_t *fn = NULL; 50483d1cb205STejun Heo char name[WQ_NAME_LEN] = { }; 50493d1cb205STejun Heo char desc[WORKER_DESC_LEN] = { }; 50503d1cb205STejun Heo struct pool_workqueue *pwq = NULL; 50513d1cb205STejun Heo struct workqueue_struct *wq = NULL; 50523d1cb205STejun Heo struct worker *worker; 50533d1cb205STejun Heo 50543d1cb205STejun Heo if (!(task->flags & PF_WQ_WORKER)) 50553d1cb205STejun Heo return; 50563d1cb205STejun Heo 50573d1cb205STejun Heo /* 50583d1cb205STejun Heo * This function is called without any synchronization and @task 50593d1cb205STejun Heo * could be in any state. Be careful with dereferences. 50603d1cb205STejun Heo */ 5061e700591aSPetr Mladek worker = kthread_probe_data(task); 50623d1cb205STejun Heo 50633d1cb205STejun Heo /* 50648bf89593STejun Heo * Carefully copy the associated workqueue's workfn, name and desc. 50658bf89593STejun Heo * Keep the original last '\0' in case the original is garbage. 50663d1cb205STejun Heo */ 5067fe557319SChristoph Hellwig copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn)); 5068fe557319SChristoph Hellwig copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq)); 5069fe557319SChristoph Hellwig copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq)); 5070fe557319SChristoph Hellwig copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1); 5071fe557319SChristoph Hellwig copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1); 50723d1cb205STejun Heo 50733d1cb205STejun Heo if (fn || name[0] || desc[0]) { 5074d75f773cSSakari Ailus printk("%sWorkqueue: %s %ps", log_lvl, name, fn); 50758bf89593STejun Heo if (strcmp(name, desc)) 50763d1cb205STejun Heo pr_cont(" (%s)", desc); 50773d1cb205STejun Heo pr_cont("\n"); 50783d1cb205STejun Heo } 50793d1cb205STejun Heo } 50803d1cb205STejun Heo 50813494fc30STejun Heo static void pr_cont_pool_info(struct worker_pool *pool) 50823494fc30STejun Heo { 50833494fc30STejun Heo pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask); 50843494fc30STejun Heo if (pool->node != NUMA_NO_NODE) 50853494fc30STejun Heo pr_cont(" node=%d", pool->node); 50863494fc30STejun Heo pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice); 50873494fc30STejun Heo } 50883494fc30STejun Heo 5089c76feb0dSPaul E. McKenney struct pr_cont_work_struct { 5090c76feb0dSPaul E. McKenney bool comma; 5091c76feb0dSPaul E. McKenney work_func_t func; 5092c76feb0dSPaul E. McKenney long ctr; 5093c76feb0dSPaul E. McKenney }; 5094c76feb0dSPaul E. McKenney 5095c76feb0dSPaul E. McKenney static void pr_cont_work_flush(bool comma, work_func_t func, struct pr_cont_work_struct *pcwsp) 5096c76feb0dSPaul E. McKenney { 5097c76feb0dSPaul E. McKenney if (!pcwsp->ctr) 5098c76feb0dSPaul E. McKenney goto out_record; 5099c76feb0dSPaul E. McKenney if (func == pcwsp->func) { 5100c76feb0dSPaul E. McKenney pcwsp->ctr++; 5101c76feb0dSPaul E. McKenney return; 5102c76feb0dSPaul E. McKenney } 5103c76feb0dSPaul E. McKenney if (pcwsp->ctr == 1) 5104c76feb0dSPaul E. McKenney pr_cont("%s %ps", pcwsp->comma ? "," : "", pcwsp->func); 5105c76feb0dSPaul E. McKenney else 5106c76feb0dSPaul E. McKenney pr_cont("%s %ld*%ps", pcwsp->comma ? "," : "", pcwsp->ctr, pcwsp->func); 5107c76feb0dSPaul E. McKenney pcwsp->ctr = 0; 5108c76feb0dSPaul E. McKenney out_record: 5109c76feb0dSPaul E. McKenney if ((long)func == -1L) 5110c76feb0dSPaul E. McKenney return; 5111c76feb0dSPaul E. McKenney pcwsp->comma = comma; 5112c76feb0dSPaul E. McKenney pcwsp->func = func; 5113c76feb0dSPaul E. McKenney pcwsp->ctr = 1; 5114c76feb0dSPaul E. McKenney } 5115c76feb0dSPaul E. McKenney 5116c76feb0dSPaul E. McKenney static void pr_cont_work(bool comma, struct work_struct *work, struct pr_cont_work_struct *pcwsp) 51173494fc30STejun Heo { 51183494fc30STejun Heo if (work->func == wq_barrier_func) { 51193494fc30STejun Heo struct wq_barrier *barr; 51203494fc30STejun Heo 51213494fc30STejun Heo barr = container_of(work, struct wq_barrier, work); 51223494fc30STejun Heo 5123c76feb0dSPaul E. McKenney pr_cont_work_flush(comma, (work_func_t)-1, pcwsp); 51243494fc30STejun Heo pr_cont("%s BAR(%d)", comma ? "," : "", 51253494fc30STejun Heo task_pid_nr(barr->task)); 51263494fc30STejun Heo } else { 5127c76feb0dSPaul E. McKenney if (!comma) 5128c76feb0dSPaul E. McKenney pr_cont_work_flush(comma, (work_func_t)-1, pcwsp); 5129c76feb0dSPaul E. McKenney pr_cont_work_flush(comma, work->func, pcwsp); 51303494fc30STejun Heo } 51313494fc30STejun Heo } 51323494fc30STejun Heo 51333494fc30STejun Heo static void show_pwq(struct pool_workqueue *pwq) 51343494fc30STejun Heo { 5135c76feb0dSPaul E. McKenney struct pr_cont_work_struct pcws = { .ctr = 0, }; 51363494fc30STejun Heo struct worker_pool *pool = pwq->pool; 51373494fc30STejun Heo struct work_struct *work; 51383494fc30STejun Heo struct worker *worker; 51393494fc30STejun Heo bool has_in_flight = false, has_pending = false; 51403494fc30STejun Heo int bkt; 51413494fc30STejun Heo 51423494fc30STejun Heo pr_info(" pwq %d:", pool->id); 51433494fc30STejun Heo pr_cont_pool_info(pool); 51443494fc30STejun Heo 514582e098f5STejun Heo pr_cont(" active=%d refcnt=%d%s\n", 514682e098f5STejun Heo pwq->nr_active, pwq->refcnt, 51473494fc30STejun Heo !list_empty(&pwq->mayday_node) ? " MAYDAY" : ""); 51483494fc30STejun Heo 51493494fc30STejun Heo hash_for_each(pool->busy_hash, bkt, worker, hentry) { 51503494fc30STejun Heo if (worker->current_pwq == pwq) { 51513494fc30STejun Heo has_in_flight = true; 51523494fc30STejun Heo break; 51533494fc30STejun Heo } 51543494fc30STejun Heo } 51553494fc30STejun Heo if (has_in_flight) { 51563494fc30STejun Heo bool comma = false; 51573494fc30STejun Heo 51583494fc30STejun Heo pr_info(" in-flight:"); 51593494fc30STejun Heo hash_for_each(pool->busy_hash, bkt, worker, hentry) { 51603494fc30STejun Heo if (worker->current_pwq != pwq) 51613494fc30STejun Heo continue; 51623494fc30STejun Heo 5163d75f773cSSakari Ailus pr_cont("%s %d%s:%ps", comma ? "," : "", 51643494fc30STejun Heo task_pid_nr(worker->task), 516530ae2fc0STejun Heo worker->rescue_wq ? "(RESCUER)" : "", 51663494fc30STejun Heo worker->current_func); 51673494fc30STejun Heo list_for_each_entry(work, &worker->scheduled, entry) 5168c76feb0dSPaul E. McKenney pr_cont_work(false, work, &pcws); 5169c76feb0dSPaul E. McKenney pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); 51703494fc30STejun Heo comma = true; 51713494fc30STejun Heo } 51723494fc30STejun Heo pr_cont("\n"); 51733494fc30STejun Heo } 51743494fc30STejun Heo 51753494fc30STejun Heo list_for_each_entry(work, &pool->worklist, entry) { 51763494fc30STejun Heo if (get_work_pwq(work) == pwq) { 51773494fc30STejun Heo has_pending = true; 51783494fc30STejun Heo break; 51793494fc30STejun Heo } 51803494fc30STejun Heo } 51813494fc30STejun Heo if (has_pending) { 51823494fc30STejun Heo bool comma = false; 51833494fc30STejun Heo 51843494fc30STejun Heo pr_info(" pending:"); 51853494fc30STejun Heo list_for_each_entry(work, &pool->worklist, entry) { 51863494fc30STejun Heo if (get_work_pwq(work) != pwq) 51873494fc30STejun Heo continue; 51883494fc30STejun Heo 5189c76feb0dSPaul E. McKenney pr_cont_work(comma, work, &pcws); 51903494fc30STejun Heo comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); 51913494fc30STejun Heo } 5192c76feb0dSPaul E. McKenney pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); 51933494fc30STejun Heo pr_cont("\n"); 51943494fc30STejun Heo } 51953494fc30STejun Heo 5196f97a4a1aSLai Jiangshan if (!list_empty(&pwq->inactive_works)) { 51973494fc30STejun Heo bool comma = false; 51983494fc30STejun Heo 5199f97a4a1aSLai Jiangshan pr_info(" inactive:"); 5200f97a4a1aSLai Jiangshan list_for_each_entry(work, &pwq->inactive_works, entry) { 5201c76feb0dSPaul E. McKenney pr_cont_work(comma, work, &pcws); 52023494fc30STejun Heo comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); 52033494fc30STejun Heo } 5204c76feb0dSPaul E. McKenney pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); 52053494fc30STejun Heo pr_cont("\n"); 52063494fc30STejun Heo } 52073494fc30STejun Heo } 52083494fc30STejun Heo 52093494fc30STejun Heo /** 521055df0933SImran Khan * show_one_workqueue - dump state of specified workqueue 521155df0933SImran Khan * @wq: workqueue whose state will be printed 52123494fc30STejun Heo */ 521355df0933SImran Khan void show_one_workqueue(struct workqueue_struct *wq) 52143494fc30STejun Heo { 52153494fc30STejun Heo struct pool_workqueue *pwq; 52163494fc30STejun Heo bool idle = true; 521755df0933SImran Khan unsigned long flags; 52183494fc30STejun Heo 52193494fc30STejun Heo for_each_pwq(pwq, wq) { 522035bf38ddSGreg Kroah-Hartman if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { 52213494fc30STejun Heo idle = false; 52223494fc30STejun Heo break; 52233494fc30STejun Heo } 52243494fc30STejun Heo } 522555df0933SImran Khan if (idle) /* Nothing to print for idle workqueue */ 522655df0933SImran Khan return; 52273494fc30STejun Heo 52283494fc30STejun Heo pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); 52293494fc30STejun Heo 52303494fc30STejun Heo for_each_pwq(pwq, wq) { 5231a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irqsave(&pwq->pool->lock, flags); 523235bf38ddSGreg Kroah-Hartman if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { 523357116ce1SJohan Hovold /* 523457116ce1SJohan Hovold * Defer printing to avoid deadlocks in console 523557116ce1SJohan Hovold * drivers that queue work while holding locks 523657116ce1SJohan Hovold * also taken in their write paths. 523757116ce1SJohan Hovold */ 523857116ce1SJohan Hovold printk_deferred_enter(); 52393494fc30STejun Heo show_pwq(pwq); 524057116ce1SJohan Hovold printk_deferred_exit(); 524157116ce1SJohan Hovold } 5242a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); 524362635ea8SSergey Senozhatsky /* 524462635ea8SSergey Senozhatsky * We could be printing a lot from atomic context, e.g. 524555df0933SImran Khan * sysrq-t -> show_all_workqueues(). Avoid triggering 524662635ea8SSergey Senozhatsky * hard lockup. 524762635ea8SSergey Senozhatsky */ 524862635ea8SSergey Senozhatsky touch_nmi_watchdog(); 52493494fc30STejun Heo } 525055df0933SImran Khan 52513494fc30STejun Heo } 52523494fc30STejun Heo 525355df0933SImran Khan /** 525455df0933SImran Khan * show_one_worker_pool - dump state of specified worker pool 525555df0933SImran Khan * @pool: worker pool whose state will be printed 525655df0933SImran Khan */ 525755df0933SImran Khan static void show_one_worker_pool(struct worker_pool *pool) 525855df0933SImran Khan { 52593494fc30STejun Heo struct worker *worker; 52603494fc30STejun Heo bool first = true; 526155df0933SImran Khan unsigned long flags; 5262335a42ebSPetr Mladek unsigned long hung = 0; 52633494fc30STejun Heo 5264a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irqsave(&pool->lock, flags); 52653494fc30STejun Heo if (pool->nr_workers == pool->nr_idle) 52663494fc30STejun Heo goto next_pool; 5267335a42ebSPetr Mladek 5268335a42ebSPetr Mladek /* How long the first pending work is waiting for a worker. */ 5269335a42ebSPetr Mladek if (!list_empty(&pool->worklist)) 5270335a42ebSPetr Mladek hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000; 5271335a42ebSPetr Mladek 527257116ce1SJohan Hovold /* 527357116ce1SJohan Hovold * Defer printing to avoid deadlocks in console drivers that 527457116ce1SJohan Hovold * queue work while holding locks also taken in their write 527557116ce1SJohan Hovold * paths. 527657116ce1SJohan Hovold */ 527757116ce1SJohan Hovold printk_deferred_enter(); 52783494fc30STejun Heo pr_info("pool %d:", pool->id); 52793494fc30STejun Heo pr_cont_pool_info(pool); 5280335a42ebSPetr Mladek pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers); 52813494fc30STejun Heo if (pool->manager) 52823494fc30STejun Heo pr_cont(" manager: %d", 52833494fc30STejun Heo task_pid_nr(pool->manager->task)); 52843494fc30STejun Heo list_for_each_entry(worker, &pool->idle_list, entry) { 52853494fc30STejun Heo pr_cont(" %s%d", first ? "idle: " : "", 52863494fc30STejun Heo task_pid_nr(worker->task)); 52873494fc30STejun Heo first = false; 52883494fc30STejun Heo } 52893494fc30STejun Heo pr_cont("\n"); 529057116ce1SJohan Hovold printk_deferred_exit(); 52913494fc30STejun Heo next_pool: 5292a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irqrestore(&pool->lock, flags); 529362635ea8SSergey Senozhatsky /* 529462635ea8SSergey Senozhatsky * We could be printing a lot from atomic context, e.g. 529555df0933SImran Khan * sysrq-t -> show_all_workqueues(). Avoid triggering 529662635ea8SSergey Senozhatsky * hard lockup. 529762635ea8SSergey Senozhatsky */ 529862635ea8SSergey Senozhatsky touch_nmi_watchdog(); 529955df0933SImran Khan 53003494fc30STejun Heo } 53013494fc30STejun Heo 530255df0933SImran Khan /** 530355df0933SImran Khan * show_all_workqueues - dump workqueue state 530455df0933SImran Khan * 5305704bc669SJungseung Lee * Called from a sysrq handler and prints out all busy workqueues and pools. 530655df0933SImran Khan */ 530755df0933SImran Khan void show_all_workqueues(void) 530855df0933SImran Khan { 530955df0933SImran Khan struct workqueue_struct *wq; 531055df0933SImran Khan struct worker_pool *pool; 531155df0933SImran Khan int pi; 531255df0933SImran Khan 531355df0933SImran Khan rcu_read_lock(); 531455df0933SImran Khan 531555df0933SImran Khan pr_info("Showing busy workqueues and worker pools:\n"); 531655df0933SImran Khan 531755df0933SImran Khan list_for_each_entry_rcu(wq, &workqueues, list) 531855df0933SImran Khan show_one_workqueue(wq); 531955df0933SImran Khan 532055df0933SImran Khan for_each_pool(pool, pi) 532155df0933SImran Khan show_one_worker_pool(pool); 532255df0933SImran Khan 532324acfb71SThomas Gleixner rcu_read_unlock(); 53243494fc30STejun Heo } 53253494fc30STejun Heo 5326704bc669SJungseung Lee /** 5327704bc669SJungseung Lee * show_freezable_workqueues - dump freezable workqueue state 5328704bc669SJungseung Lee * 5329704bc669SJungseung Lee * Called from try_to_freeze_tasks() and prints out all freezable workqueues 5330704bc669SJungseung Lee * still busy. 5331704bc669SJungseung Lee */ 5332704bc669SJungseung Lee void show_freezable_workqueues(void) 5333704bc669SJungseung Lee { 5334704bc669SJungseung Lee struct workqueue_struct *wq; 5335704bc669SJungseung Lee 5336704bc669SJungseung Lee rcu_read_lock(); 5337704bc669SJungseung Lee 5338704bc669SJungseung Lee pr_info("Showing freezable workqueues that are still busy:\n"); 5339704bc669SJungseung Lee 5340704bc669SJungseung Lee list_for_each_entry_rcu(wq, &workqueues, list) { 5341704bc669SJungseung Lee if (!(wq->flags & WQ_FREEZABLE)) 5342704bc669SJungseung Lee continue; 5343704bc669SJungseung Lee show_one_workqueue(wq); 5344704bc669SJungseung Lee } 5345704bc669SJungseung Lee 5346704bc669SJungseung Lee rcu_read_unlock(); 5347704bc669SJungseung Lee } 5348704bc669SJungseung Lee 53496b59808bSTejun Heo /* used to show worker information through /proc/PID/{comm,stat,status} */ 53506b59808bSTejun Heo void wq_worker_comm(char *buf, size_t size, struct task_struct *task) 53516b59808bSTejun Heo { 53526b59808bSTejun Heo int off; 53536b59808bSTejun Heo 53546b59808bSTejun Heo /* always show the actual comm */ 53556b59808bSTejun Heo off = strscpy(buf, task->comm, size); 53566b59808bSTejun Heo if (off < 0) 53576b59808bSTejun Heo return; 53586b59808bSTejun Heo 5359197f6accSTejun Heo /* stabilize PF_WQ_WORKER and worker pool association */ 53606b59808bSTejun Heo mutex_lock(&wq_pool_attach_mutex); 53616b59808bSTejun Heo 5362197f6accSTejun Heo if (task->flags & PF_WQ_WORKER) { 5363197f6accSTejun Heo struct worker *worker = kthread_data(task); 5364197f6accSTejun Heo struct worker_pool *pool = worker->pool; 53656b59808bSTejun Heo 53666b59808bSTejun Heo if (pool) { 5367a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pool->lock); 53686b59808bSTejun Heo /* 5369197f6accSTejun Heo * ->desc tracks information (wq name or 5370197f6accSTejun Heo * set_worker_desc()) for the latest execution. If 5371197f6accSTejun Heo * current, prepend '+', otherwise '-'. 53726b59808bSTejun Heo */ 53736b59808bSTejun Heo if (worker->desc[0] != '\0') { 53746b59808bSTejun Heo if (worker->current_work) 53756b59808bSTejun Heo scnprintf(buf + off, size - off, "+%s", 53766b59808bSTejun Heo worker->desc); 53776b59808bSTejun Heo else 53786b59808bSTejun Heo scnprintf(buf + off, size - off, "-%s", 53796b59808bSTejun Heo worker->desc); 53806b59808bSTejun Heo } 5381a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 53826b59808bSTejun Heo } 5383197f6accSTejun Heo } 53846b59808bSTejun Heo 53856b59808bSTejun Heo mutex_unlock(&wq_pool_attach_mutex); 53866b59808bSTejun Heo } 53876b59808bSTejun Heo 538866448bc2SMathieu Malaterre #ifdef CONFIG_SMP 538966448bc2SMathieu Malaterre 5390db7bccf4STejun Heo /* 5391db7bccf4STejun Heo * CPU hotplug. 5392db7bccf4STejun Heo * 5393e22bee78STejun Heo * There are two challenges in supporting CPU hotplug. Firstly, there 5394112202d9STejun Heo * are a lot of assumptions on strong associations among work, pwq and 5395706026c2STejun Heo * pool which make migrating pending and scheduled works very 5396e22bee78STejun Heo * difficult to implement without impacting hot paths. Secondly, 539794cf58bbSTejun Heo * worker pools serve mix of short, long and very long running works making 5398e22bee78STejun Heo * blocked draining impractical. 5399e22bee78STejun Heo * 540024647570STejun Heo * This is solved by allowing the pools to be disassociated from the CPU 5401628c78e7STejun Heo * running as an unbound one and allowing it to be reattached later if the 5402628c78e7STejun Heo * cpu comes back online. 5403db7bccf4STejun Heo */ 5404db7bccf4STejun Heo 5405e8b3f8dbSLai Jiangshan static void unbind_workers(int cpu) 5406db7bccf4STejun Heo { 54074ce62e9eSTejun Heo struct worker_pool *pool; 5408db7bccf4STejun Heo struct worker *worker; 5409db7bccf4STejun Heo 5410f02ae73aSTejun Heo for_each_cpu_worker_pool(pool, cpu) { 54111258fae7STejun Heo mutex_lock(&wq_pool_attach_mutex); 5412a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pool->lock); 5413e22bee78STejun Heo 5414f2d5a0eeSTejun Heo /* 541592f9c5c4SLai Jiangshan * We've blocked all attach/detach operations. Make all workers 541694cf58bbSTejun Heo * unbound and set DISASSOCIATED. Before this, all workers 541711b45b0bSLai Jiangshan * must be on the cpu. After this, they may become diasporas. 5418b4ac9384SLai Jiangshan * And the preemption disabled section in their sched callbacks 5419b4ac9384SLai Jiangshan * are guaranteed to see WORKER_UNBOUND since the code here 5420b4ac9384SLai Jiangshan * is on the same cpu. 5421f2d5a0eeSTejun Heo */ 5422da028469SLai Jiangshan for_each_pool_worker(worker, pool) 5423403c821dSTejun Heo worker->flags |= WORKER_UNBOUND; 5424db7bccf4STejun Heo 542524647570STejun Heo pool->flags |= POOL_DISASSOCIATED; 5426f2d5a0eeSTejun Heo 5427e22bee78STejun Heo /* 5428989442d7SLai Jiangshan * The handling of nr_running in sched callbacks are disabled 5429989442d7SLai Jiangshan * now. Zap nr_running. After this, nr_running stays zero and 5430989442d7SLai Jiangshan * need_more_worker() and keep_working() are always true as 5431989442d7SLai Jiangshan * long as the worklist is not empty. This pool now behaves as 5432989442d7SLai Jiangshan * an unbound (in terms of concurrency management) pool which 5433eb283428SLai Jiangshan * are served by workers tied to the pool. 5434e22bee78STejun Heo */ 5435bc35f7efSLai Jiangshan pool->nr_running = 0; 5436eb283428SLai Jiangshan 5437eb283428SLai Jiangshan /* 5438eb283428SLai Jiangshan * With concurrency management just turned off, a busy 5439eb283428SLai Jiangshan * worker blocking could lead to lengthy stalls. Kick off 5440eb283428SLai Jiangshan * unbound chain execution of currently pending work items. 5441eb283428SLai Jiangshan */ 54420219a352STejun Heo kick_pool(pool); 5443989442d7SLai Jiangshan 5444a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 5445989442d7SLai Jiangshan 5446793777bcSValentin Schneider for_each_pool_worker(worker, pool) 5447793777bcSValentin Schneider unbind_worker(worker); 5448989442d7SLai Jiangshan 5449989442d7SLai Jiangshan mutex_unlock(&wq_pool_attach_mutex); 5450eb283428SLai Jiangshan } 5451db7bccf4STejun Heo } 5452db7bccf4STejun Heo 5453bd7c089eSTejun Heo /** 5454bd7c089eSTejun Heo * rebind_workers - rebind all workers of a pool to the associated CPU 5455bd7c089eSTejun Heo * @pool: pool of interest 5456bd7c089eSTejun Heo * 5457a9ab775bSTejun Heo * @pool->cpu is coming online. Rebind all workers to the CPU. 5458bd7c089eSTejun Heo */ 5459bd7c089eSTejun Heo static void rebind_workers(struct worker_pool *pool) 5460bd7c089eSTejun Heo { 5461a9ab775bSTejun Heo struct worker *worker; 5462bd7c089eSTejun Heo 54631258fae7STejun Heo lockdep_assert_held(&wq_pool_attach_mutex); 5464bd7c089eSTejun Heo 5465bd7c089eSTejun Heo /* 5466a9ab775bSTejun Heo * Restore CPU affinity of all workers. As all idle workers should 5467a9ab775bSTejun Heo * be on the run-queue of the associated CPU before any local 5468402dd89dSShailendra Verma * wake-ups for concurrency management happen, restore CPU affinity 5469a9ab775bSTejun Heo * of all workers first and then clear UNBOUND. As we're called 5470a9ab775bSTejun Heo * from CPU_ONLINE, the following shouldn't fail. 5471bd7c089eSTejun Heo */ 5472c63a2e52SValentin Schneider for_each_pool_worker(worker, pool) { 5473c63a2e52SValentin Schneider kthread_set_per_cpu(worker->task, pool->cpu); 5474c63a2e52SValentin Schneider WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, 54759546b29eSTejun Heo pool_allowed_cpus(pool)) < 0); 5476c63a2e52SValentin Schneider } 5477a9ab775bSTejun Heo 5478a9b8a985SSebastian Andrzej Siewior raw_spin_lock_irq(&pool->lock); 5479f7c17d26SWanpeng Li 54803de5e884SLai Jiangshan pool->flags &= ~POOL_DISASSOCIATED; 5481a9ab775bSTejun Heo 5482da028469SLai Jiangshan for_each_pool_worker(worker, pool) { 5483a9ab775bSTejun Heo unsigned int worker_flags = worker->flags; 5484a9ab775bSTejun Heo 5485a9ab775bSTejun Heo /* 5486a9ab775bSTejun Heo * We want to clear UNBOUND but can't directly call 5487a9ab775bSTejun Heo * worker_clr_flags() or adjust nr_running. Atomically 5488a9ab775bSTejun Heo * replace UNBOUND with another NOT_RUNNING flag REBOUND. 5489a9ab775bSTejun Heo * @worker will clear REBOUND using worker_clr_flags() when 5490a9ab775bSTejun Heo * it initiates the next execution cycle thus restoring 5491a9ab775bSTejun Heo * concurrency management. Note that when or whether 5492a9ab775bSTejun Heo * @worker clears REBOUND doesn't affect correctness. 5493a9ab775bSTejun Heo * 5494c95491edSMark Rutland * WRITE_ONCE() is necessary because @worker->flags may be 5495a9ab775bSTejun Heo * tested without holding any lock in 54966d25be57SThomas Gleixner * wq_worker_running(). Without it, NOT_RUNNING test may 5497a9ab775bSTejun Heo * fail incorrectly leading to premature concurrency 5498a9ab775bSTejun Heo * management operations. 5499bd7c089eSTejun Heo */ 5500a9ab775bSTejun Heo WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND)); 5501a9ab775bSTejun Heo worker_flags |= WORKER_REBOUND; 5502a9ab775bSTejun Heo worker_flags &= ~WORKER_UNBOUND; 5503c95491edSMark Rutland WRITE_ONCE(worker->flags, worker_flags); 5504bd7c089eSTejun Heo } 5505a9ab775bSTejun Heo 5506a9b8a985SSebastian Andrzej Siewior raw_spin_unlock_irq(&pool->lock); 5507bd7c089eSTejun Heo } 5508bd7c089eSTejun Heo 55097dbc725eSTejun Heo /** 55107dbc725eSTejun Heo * restore_unbound_workers_cpumask - restore cpumask of unbound workers 55117dbc725eSTejun Heo * @pool: unbound pool of interest 55127dbc725eSTejun Heo * @cpu: the CPU which is coming up 55137dbc725eSTejun Heo * 55147dbc725eSTejun Heo * An unbound pool may end up with a cpumask which doesn't have any online 55157dbc725eSTejun Heo * CPUs. When a worker of such pool get scheduled, the scheduler resets 55167dbc725eSTejun Heo * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any 55177dbc725eSTejun Heo * online CPU before, cpus_allowed of all its workers should be restored. 55187dbc725eSTejun Heo */ 55197dbc725eSTejun Heo static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) 55207dbc725eSTejun Heo { 55217dbc725eSTejun Heo static cpumask_t cpumask; 55227dbc725eSTejun Heo struct worker *worker; 55237dbc725eSTejun Heo 55241258fae7STejun Heo lockdep_assert_held(&wq_pool_attach_mutex); 55257dbc725eSTejun Heo 55267dbc725eSTejun Heo /* is @cpu allowed for @pool? */ 55277dbc725eSTejun Heo if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) 55287dbc725eSTejun Heo return; 55297dbc725eSTejun Heo 55307dbc725eSTejun Heo cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask); 55317dbc725eSTejun Heo 55327dbc725eSTejun Heo /* as we're called from CPU_ONLINE, the following shouldn't fail */ 5533da028469SLai Jiangshan for_each_pool_worker(worker, pool) 5534d945b5e9SPeter Zijlstra WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0); 55357dbc725eSTejun Heo } 55367dbc725eSTejun Heo 55377ee681b2SThomas Gleixner int workqueue_prepare_cpu(unsigned int cpu) 55381da177e4SLinus Torvalds { 55394ce62e9eSTejun Heo struct worker_pool *pool; 55401da177e4SLinus Torvalds 5541f02ae73aSTejun Heo for_each_cpu_worker_pool(pool, cpu) { 55423ce63377STejun Heo if (pool->nr_workers) 55433ce63377STejun Heo continue; 5544051e1850SLai Jiangshan if (!create_worker(pool)) 55457ee681b2SThomas Gleixner return -ENOMEM; 55463af24433SOleg Nesterov } 55477ee681b2SThomas Gleixner return 0; 55487ee681b2SThomas Gleixner } 55491da177e4SLinus Torvalds 55507ee681b2SThomas Gleixner int workqueue_online_cpu(unsigned int cpu) 55517ee681b2SThomas Gleixner { 55527ee681b2SThomas Gleixner struct worker_pool *pool; 55537ee681b2SThomas Gleixner struct workqueue_struct *wq; 55547ee681b2SThomas Gleixner int pi; 55557ee681b2SThomas Gleixner 555668e13a67SLai Jiangshan mutex_lock(&wq_pool_mutex); 55577dbc725eSTejun Heo 55587dbc725eSTejun Heo for_each_pool(pool, pi) { 55591258fae7STejun Heo mutex_lock(&wq_pool_attach_mutex); 556094cf58bbSTejun Heo 5561f05b558dSLai Jiangshan if (pool->cpu == cpu) 556294cf58bbSTejun Heo rebind_workers(pool); 5563f05b558dSLai Jiangshan else if (pool->cpu < 0) 55647dbc725eSTejun Heo restore_unbound_workers_cpumask(pool, cpu); 556594cf58bbSTejun Heo 55661258fae7STejun Heo mutex_unlock(&wq_pool_attach_mutex); 556794cf58bbSTejun Heo } 55687dbc725eSTejun Heo 5569fef59c9cSTejun Heo /* update pod affinity of unbound workqueues */ 55704cbfd3deSTejun Heo list_for_each_entry(wq, &workqueues, list) { 557184193c07STejun Heo struct workqueue_attrs *attrs = wq->unbound_attrs; 557284193c07STejun Heo 557384193c07STejun Heo if (attrs) { 557484193c07STejun Heo const struct wq_pod_type *pt = wqattrs_pod_type(attrs); 55754cbfd3deSTejun Heo int tcpu; 55764cbfd3deSTejun Heo 557784193c07STejun Heo for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]]) 5578fef59c9cSTejun Heo wq_update_pod(wq, tcpu, cpu, true); 55794cbfd3deSTejun Heo } 55804cbfd3deSTejun Heo } 55814c16bd32STejun Heo 558268e13a67SLai Jiangshan mutex_unlock(&wq_pool_mutex); 55837ee681b2SThomas Gleixner return 0; 558465758202STejun Heo } 558565758202STejun Heo 55867ee681b2SThomas Gleixner int workqueue_offline_cpu(unsigned int cpu) 558765758202STejun Heo { 55884c16bd32STejun Heo struct workqueue_struct *wq; 55898db25e78STejun Heo 55904c16bd32STejun Heo /* unbinding per-cpu workers should happen on the local CPU */ 5591e8b3f8dbSLai Jiangshan if (WARN_ON(cpu != smp_processor_id())) 5592e8b3f8dbSLai Jiangshan return -1; 5593e8b3f8dbSLai Jiangshan 5594e8b3f8dbSLai Jiangshan unbind_workers(cpu); 55954c16bd32STejun Heo 5596fef59c9cSTejun Heo /* update pod affinity of unbound workqueues */ 55974c16bd32STejun Heo mutex_lock(&wq_pool_mutex); 55984cbfd3deSTejun Heo list_for_each_entry(wq, &workqueues, list) { 559984193c07STejun Heo struct workqueue_attrs *attrs = wq->unbound_attrs; 560084193c07STejun Heo 560184193c07STejun Heo if (attrs) { 560284193c07STejun Heo const struct wq_pod_type *pt = wqattrs_pod_type(attrs); 56034cbfd3deSTejun Heo int tcpu; 56044cbfd3deSTejun Heo 560584193c07STejun Heo for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]]) 5606fef59c9cSTejun Heo wq_update_pod(wq, tcpu, cpu, false); 56074cbfd3deSTejun Heo } 56084cbfd3deSTejun Heo } 56094c16bd32STejun Heo mutex_unlock(&wq_pool_mutex); 56104c16bd32STejun Heo 56117ee681b2SThomas Gleixner return 0; 561265758202STejun Heo } 561365758202STejun Heo 56142d3854a3SRusty Russell struct work_for_cpu { 5615ed48ece2STejun Heo struct work_struct work; 56162d3854a3SRusty Russell long (*fn)(void *); 56172d3854a3SRusty Russell void *arg; 56182d3854a3SRusty Russell long ret; 56192d3854a3SRusty Russell }; 56202d3854a3SRusty Russell 5621ed48ece2STejun Heo static void work_for_cpu_fn(struct work_struct *work) 56222d3854a3SRusty Russell { 5623ed48ece2STejun Heo struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); 5624ed48ece2STejun Heo 56252d3854a3SRusty Russell wfc->ret = wfc->fn(wfc->arg); 56262d3854a3SRusty Russell } 56272d3854a3SRusty Russell 56282d3854a3SRusty Russell /** 5629be2355b7SFrederic Weisbecker * work_on_cpu_key - run a function in thread context on a particular cpu 56302d3854a3SRusty Russell * @cpu: the cpu to run on 56312d3854a3SRusty Russell * @fn: the function to run 56322d3854a3SRusty Russell * @arg: the function arg 5633be2355b7SFrederic Weisbecker * @key: The lock class key for lock debugging purposes 56342d3854a3SRusty Russell * 563531ad9081SRusty Russell * It is up to the caller to ensure that the cpu doesn't go offline. 56366b44003eSAndrew Morton * The caller must not hold any locks which would prevent @fn from completing. 5637d185af30SYacine Belkadi * 5638d185af30SYacine Belkadi * Return: The value @fn returns. 56392d3854a3SRusty Russell */ 5640be2355b7SFrederic Weisbecker long work_on_cpu_key(int cpu, long (*fn)(void *), 5641be2355b7SFrederic Weisbecker void *arg, struct lock_class_key *key) 56422d3854a3SRusty Russell { 5643ed48ece2STejun Heo struct work_for_cpu wfc = { .fn = fn, .arg = arg }; 56442d3854a3SRusty Russell 5645be2355b7SFrederic Weisbecker INIT_WORK_ONSTACK_KEY(&wfc.work, work_for_cpu_fn, key); 5646ed48ece2STejun Heo schedule_work_on(cpu, &wfc.work); 564712997d1aSBjorn Helgaas flush_work(&wfc.work); 5648440a1136SChuansheng Liu destroy_work_on_stack(&wfc.work); 56492d3854a3SRusty Russell return wfc.ret; 56502d3854a3SRusty Russell } 5651be2355b7SFrederic Weisbecker EXPORT_SYMBOL_GPL(work_on_cpu_key); 56520e8d6a93SThomas Gleixner 56530e8d6a93SThomas Gleixner /** 5654be2355b7SFrederic Weisbecker * work_on_cpu_safe_key - run a function in thread context on a particular cpu 56550e8d6a93SThomas Gleixner * @cpu: the cpu to run on 56560e8d6a93SThomas Gleixner * @fn: the function to run 56570e8d6a93SThomas Gleixner * @arg: the function argument 5658be2355b7SFrederic Weisbecker * @key: The lock class key for lock debugging purposes 56590e8d6a93SThomas Gleixner * 56600e8d6a93SThomas Gleixner * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold 56610e8d6a93SThomas Gleixner * any locks which would prevent @fn from completing. 56620e8d6a93SThomas Gleixner * 56630e8d6a93SThomas Gleixner * Return: The value @fn returns. 56640e8d6a93SThomas Gleixner */ 5665be2355b7SFrederic Weisbecker long work_on_cpu_safe_key(int cpu, long (*fn)(void *), 5666be2355b7SFrederic Weisbecker void *arg, struct lock_class_key *key) 56670e8d6a93SThomas Gleixner { 56680e8d6a93SThomas Gleixner long ret = -ENODEV; 56690e8d6a93SThomas Gleixner 5670ffd8bea8SSebastian Andrzej Siewior cpus_read_lock(); 56710e8d6a93SThomas Gleixner if (cpu_online(cpu)) 5672be2355b7SFrederic Weisbecker ret = work_on_cpu_key(cpu, fn, arg, key); 5673ffd8bea8SSebastian Andrzej Siewior cpus_read_unlock(); 56740e8d6a93SThomas Gleixner return ret; 56750e8d6a93SThomas Gleixner } 5676be2355b7SFrederic Weisbecker EXPORT_SYMBOL_GPL(work_on_cpu_safe_key); 56772d3854a3SRusty Russell #endif /* CONFIG_SMP */ 56782d3854a3SRusty Russell 5679a0a1a5fdSTejun Heo #ifdef CONFIG_FREEZER 5680e7577c50SRusty Russell 5681a0a1a5fdSTejun Heo /** 5682a0a1a5fdSTejun Heo * freeze_workqueues_begin - begin freezing workqueues 5683a0a1a5fdSTejun Heo * 568458a69cb4STejun Heo * Start freezing workqueues. After this function returns, all freezable 5685f97a4a1aSLai Jiangshan * workqueues will queue new works to their inactive_works list instead of 5686706026c2STejun Heo * pool->worklist. 5687a0a1a5fdSTejun Heo * 5688a0a1a5fdSTejun Heo * CONTEXT: 5689a357fc03SLai Jiangshan * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's. 5690a0a1a5fdSTejun Heo */ 5691a0a1a5fdSTejun Heo void freeze_workqueues_begin(void) 5692a0a1a5fdSTejun Heo { 569324b8a847STejun Heo struct workqueue_struct *wq; 5694a0a1a5fdSTejun Heo 569568e13a67SLai Jiangshan mutex_lock(&wq_pool_mutex); 5696a0a1a5fdSTejun Heo 56976183c009STejun Heo WARN_ON_ONCE(workqueue_freezing); 5698a0a1a5fdSTejun Heo workqueue_freezing = true; 5699a0a1a5fdSTejun Heo 570024b8a847STejun Heo list_for_each_entry(wq, &workqueues, list) { 5701a357fc03SLai Jiangshan mutex_lock(&wq->mutex); 570282e098f5STejun Heo wq_adjust_max_active(wq); 5703a357fc03SLai Jiangshan mutex_unlock(&wq->mutex); 5704a1056305STejun Heo } 57055bcab335STejun Heo 570668e13a67SLai Jiangshan mutex_unlock(&wq_pool_mutex); 5707a0a1a5fdSTejun Heo } 5708a0a1a5fdSTejun Heo 5709a0a1a5fdSTejun Heo /** 571058a69cb4STejun Heo * freeze_workqueues_busy - are freezable workqueues still busy? 5711a0a1a5fdSTejun Heo * 5712a0a1a5fdSTejun Heo * Check whether freezing is complete. This function must be called 5713a0a1a5fdSTejun Heo * between freeze_workqueues_begin() and thaw_workqueues(). 5714a0a1a5fdSTejun Heo * 5715a0a1a5fdSTejun Heo * CONTEXT: 571668e13a67SLai Jiangshan * Grabs and releases wq_pool_mutex. 5717a0a1a5fdSTejun Heo * 5718d185af30SYacine Belkadi * Return: 571958a69cb4STejun Heo * %true if some freezable workqueues are still busy. %false if freezing 572058a69cb4STejun Heo * is complete. 5721a0a1a5fdSTejun Heo */ 5722a0a1a5fdSTejun Heo bool freeze_workqueues_busy(void) 5723a0a1a5fdSTejun Heo { 5724a0a1a5fdSTejun Heo bool busy = false; 572524b8a847STejun Heo struct workqueue_struct *wq; 572624b8a847STejun Heo struct pool_workqueue *pwq; 5727a0a1a5fdSTejun Heo 572868e13a67SLai Jiangshan mutex_lock(&wq_pool_mutex); 5729a0a1a5fdSTejun Heo 57306183c009STejun Heo WARN_ON_ONCE(!workqueue_freezing); 5731a0a1a5fdSTejun Heo 573224b8a847STejun Heo list_for_each_entry(wq, &workqueues, list) { 573324b8a847STejun Heo if (!(wq->flags & WQ_FREEZABLE)) 573424b8a847STejun Heo continue; 5735a0a1a5fdSTejun Heo /* 5736a0a1a5fdSTejun Heo * nr_active is monotonically decreasing. It's safe 5737a0a1a5fdSTejun Heo * to peek without lock. 5738a0a1a5fdSTejun Heo */ 573924acfb71SThomas Gleixner rcu_read_lock(); 574024b8a847STejun Heo for_each_pwq(pwq, wq) { 57416183c009STejun Heo WARN_ON_ONCE(pwq->nr_active < 0); 5742112202d9STejun Heo if (pwq->nr_active) { 5743a0a1a5fdSTejun Heo busy = true; 574424acfb71SThomas Gleixner rcu_read_unlock(); 5745a0a1a5fdSTejun Heo goto out_unlock; 5746a0a1a5fdSTejun Heo } 5747a0a1a5fdSTejun Heo } 574824acfb71SThomas Gleixner rcu_read_unlock(); 5749a0a1a5fdSTejun Heo } 5750a0a1a5fdSTejun Heo out_unlock: 575168e13a67SLai Jiangshan mutex_unlock(&wq_pool_mutex); 5752a0a1a5fdSTejun Heo return busy; 5753a0a1a5fdSTejun Heo } 5754a0a1a5fdSTejun Heo 5755a0a1a5fdSTejun Heo /** 5756a0a1a5fdSTejun Heo * thaw_workqueues - thaw workqueues 5757a0a1a5fdSTejun Heo * 5758a0a1a5fdSTejun Heo * Thaw workqueues. Normal queueing is restored and all collected 5759706026c2STejun Heo * frozen works are transferred to their respective pool worklists. 5760a0a1a5fdSTejun Heo * 5761a0a1a5fdSTejun Heo * CONTEXT: 5762a357fc03SLai Jiangshan * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's. 5763a0a1a5fdSTejun Heo */ 5764a0a1a5fdSTejun Heo void thaw_workqueues(void) 5765a0a1a5fdSTejun Heo { 576624b8a847STejun Heo struct workqueue_struct *wq; 5767a0a1a5fdSTejun Heo 576868e13a67SLai Jiangshan mutex_lock(&wq_pool_mutex); 5769a0a1a5fdSTejun Heo 5770a0a1a5fdSTejun Heo if (!workqueue_freezing) 5771a0a1a5fdSTejun Heo goto out_unlock; 5772a0a1a5fdSTejun Heo 577374b414eaSLai Jiangshan workqueue_freezing = false; 577424b8a847STejun Heo 577524b8a847STejun Heo /* restore max_active and repopulate worklist */ 577624b8a847STejun Heo list_for_each_entry(wq, &workqueues, list) { 5777a357fc03SLai Jiangshan mutex_lock(&wq->mutex); 577882e098f5STejun Heo wq_adjust_max_active(wq); 5779a357fc03SLai Jiangshan mutex_unlock(&wq->mutex); 578024b8a847STejun Heo } 578124b8a847STejun Heo 5782a0a1a5fdSTejun Heo out_unlock: 578368e13a67SLai Jiangshan mutex_unlock(&wq_pool_mutex); 5784a0a1a5fdSTejun Heo } 5785a0a1a5fdSTejun Heo #endif /* CONFIG_FREEZER */ 5786a0a1a5fdSTejun Heo 578799c621efSLai Jiangshan static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask) 5788042f7df1SLai Jiangshan { 5789042f7df1SLai Jiangshan LIST_HEAD(ctxs); 5790042f7df1SLai Jiangshan int ret = 0; 5791042f7df1SLai Jiangshan struct workqueue_struct *wq; 5792042f7df1SLai Jiangshan struct apply_wqattrs_ctx *ctx, *n; 5793042f7df1SLai Jiangshan 5794042f7df1SLai Jiangshan lockdep_assert_held(&wq_pool_mutex); 5795042f7df1SLai Jiangshan 5796042f7df1SLai Jiangshan list_for_each_entry(wq, &workqueues, list) { 5797042f7df1SLai Jiangshan if (!(wq->flags & WQ_UNBOUND)) 5798042f7df1SLai Jiangshan continue; 5799042f7df1SLai Jiangshan /* creating multiple pwqs breaks ordering guarantee */ 58005ad73e10STejun Heo if (wq->flags & __WQ_ORDERED) 5801042f7df1SLai Jiangshan continue; 5802042f7df1SLai Jiangshan 580399c621efSLai Jiangshan ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask); 580484193c07STejun Heo if (IS_ERR(ctx)) { 580584193c07STejun Heo ret = PTR_ERR(ctx); 5806042f7df1SLai Jiangshan break; 5807042f7df1SLai Jiangshan } 5808042f7df1SLai Jiangshan 5809042f7df1SLai Jiangshan list_add_tail(&ctx->list, &ctxs); 5810042f7df1SLai Jiangshan } 5811042f7df1SLai Jiangshan 5812042f7df1SLai Jiangshan list_for_each_entry_safe(ctx, n, &ctxs, list) { 5813042f7df1SLai Jiangshan if (!ret) 5814042f7df1SLai Jiangshan apply_wqattrs_commit(ctx); 5815042f7df1SLai Jiangshan apply_wqattrs_cleanup(ctx); 5816042f7df1SLai Jiangshan } 5817042f7df1SLai Jiangshan 581899c621efSLai Jiangshan if (!ret) { 581999c621efSLai Jiangshan mutex_lock(&wq_pool_attach_mutex); 582099c621efSLai Jiangshan cpumask_copy(wq_unbound_cpumask, unbound_cpumask); 582199c621efSLai Jiangshan mutex_unlock(&wq_pool_attach_mutex); 582299c621efSLai Jiangshan } 5823042f7df1SLai Jiangshan return ret; 5824042f7df1SLai Jiangshan } 5825042f7df1SLai Jiangshan 5826042f7df1SLai Jiangshan /** 5827042f7df1SLai Jiangshan * workqueue_set_unbound_cpumask - Set the low-level unbound cpumask 5828042f7df1SLai Jiangshan * @cpumask: the cpumask to set 5829042f7df1SLai Jiangshan * 5830042f7df1SLai Jiangshan * The low-level workqueues cpumask is a global cpumask that limits 5831042f7df1SLai Jiangshan * the affinity of all unbound workqueues. This function check the @cpumask 5832042f7df1SLai Jiangshan * and apply it to all unbound workqueues and updates all pwqs of them. 5833042f7df1SLai Jiangshan * 583467dc8325SCai Huoqing * Return: 0 - Success 5835042f7df1SLai Jiangshan * -EINVAL - Invalid @cpumask 5836042f7df1SLai Jiangshan * -ENOMEM - Failed to allocate memory for attrs or pwqs. 5837042f7df1SLai Jiangshan */ 5838042f7df1SLai Jiangshan int workqueue_set_unbound_cpumask(cpumask_var_t cpumask) 5839042f7df1SLai Jiangshan { 5840042f7df1SLai Jiangshan int ret = -EINVAL; 5841042f7df1SLai Jiangshan 5842c98a9805STal Shorer /* 5843c98a9805STal Shorer * Not excluding isolated cpus on purpose. 5844c98a9805STal Shorer * If the user wishes to include them, we allow that. 5845c98a9805STal Shorer */ 5846042f7df1SLai Jiangshan cpumask_and(cpumask, cpumask, cpu_possible_mask); 5847042f7df1SLai Jiangshan if (!cpumask_empty(cpumask)) { 5848a0111cf6SLai Jiangshan apply_wqattrs_lock(); 5849d25302e4SMenglong Dong if (cpumask_equal(cpumask, wq_unbound_cpumask)) { 5850d25302e4SMenglong Dong ret = 0; 5851d25302e4SMenglong Dong goto out_unlock; 5852d25302e4SMenglong Dong } 5853d25302e4SMenglong Dong 585499c621efSLai Jiangshan ret = workqueue_apply_unbound_cpumask(cpumask); 5855042f7df1SLai Jiangshan 5856d25302e4SMenglong Dong out_unlock: 5857a0111cf6SLai Jiangshan apply_wqattrs_unlock(); 5858042f7df1SLai Jiangshan } 5859042f7df1SLai Jiangshan 5860042f7df1SLai Jiangshan return ret; 5861042f7df1SLai Jiangshan } 5862042f7df1SLai Jiangshan 586363c5484eSTejun Heo static int parse_affn_scope(const char *val) 586463c5484eSTejun Heo { 586563c5484eSTejun Heo int i; 586663c5484eSTejun Heo 586763c5484eSTejun Heo for (i = 0; i < ARRAY_SIZE(wq_affn_names); i++) { 586863c5484eSTejun Heo if (!strncasecmp(val, wq_affn_names[i], strlen(wq_affn_names[i]))) 586963c5484eSTejun Heo return i; 587063c5484eSTejun Heo } 587163c5484eSTejun Heo return -EINVAL; 587263c5484eSTejun Heo } 587363c5484eSTejun Heo 587463c5484eSTejun Heo static int wq_affn_dfl_set(const char *val, const struct kernel_param *kp) 587563c5484eSTejun Heo { 5876523a301eSTejun Heo struct workqueue_struct *wq; 5877523a301eSTejun Heo int affn, cpu; 587863c5484eSTejun Heo 587963c5484eSTejun Heo affn = parse_affn_scope(val); 588063c5484eSTejun Heo if (affn < 0) 588163c5484eSTejun Heo return affn; 5882523a301eSTejun Heo if (affn == WQ_AFFN_DFL) 5883523a301eSTejun Heo return -EINVAL; 5884523a301eSTejun Heo 5885523a301eSTejun Heo cpus_read_lock(); 5886523a301eSTejun Heo mutex_lock(&wq_pool_mutex); 588763c5484eSTejun Heo 588863c5484eSTejun Heo wq_affn_dfl = affn; 5889523a301eSTejun Heo 5890523a301eSTejun Heo list_for_each_entry(wq, &workqueues, list) { 5891523a301eSTejun Heo for_each_online_cpu(cpu) { 5892523a301eSTejun Heo wq_update_pod(wq, cpu, cpu, true); 5893523a301eSTejun Heo } 5894523a301eSTejun Heo } 5895523a301eSTejun Heo 5896523a301eSTejun Heo mutex_unlock(&wq_pool_mutex); 5897523a301eSTejun Heo cpus_read_unlock(); 5898523a301eSTejun Heo 589963c5484eSTejun Heo return 0; 590063c5484eSTejun Heo } 590163c5484eSTejun Heo 590263c5484eSTejun Heo static int wq_affn_dfl_get(char *buffer, const struct kernel_param *kp) 590363c5484eSTejun Heo { 590463c5484eSTejun Heo return scnprintf(buffer, PAGE_SIZE, "%s\n", wq_affn_names[wq_affn_dfl]); 590563c5484eSTejun Heo } 590663c5484eSTejun Heo 590763c5484eSTejun Heo static const struct kernel_param_ops wq_affn_dfl_ops = { 590863c5484eSTejun Heo .set = wq_affn_dfl_set, 590963c5484eSTejun Heo .get = wq_affn_dfl_get, 591063c5484eSTejun Heo }; 591163c5484eSTejun Heo 591263c5484eSTejun Heo module_param_cb(default_affinity_scope, &wq_affn_dfl_ops, NULL, 0644); 591363c5484eSTejun Heo 59146ba94429SFrederic Weisbecker #ifdef CONFIG_SYSFS 59156ba94429SFrederic Weisbecker /* 59166ba94429SFrederic Weisbecker * Workqueues with WQ_SYSFS flag set is visible to userland via 59176ba94429SFrederic Weisbecker * /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the 59186ba94429SFrederic Weisbecker * following attributes. 59196ba94429SFrederic Weisbecker * 59206ba94429SFrederic Weisbecker * per_cpu RO bool : whether the workqueue is per-cpu or unbound 59216ba94429SFrederic Weisbecker * max_active RW int : maximum number of in-flight work items 59226ba94429SFrederic Weisbecker * 59236ba94429SFrederic Weisbecker * Unbound workqueues have the following extra attributes. 59246ba94429SFrederic Weisbecker * 59256ba94429SFrederic Weisbecker * nice RW int : nice value of the workers 59266ba94429SFrederic Weisbecker * cpumask RW mask : bitmask of allowed CPUs for the workers 592763c5484eSTejun Heo * affinity_scope RW str : worker CPU affinity scope (cache, numa, none) 59288639ecebSTejun Heo * affinity_strict RW bool : worker CPU affinity is strict 59296ba94429SFrederic Weisbecker */ 59306ba94429SFrederic Weisbecker struct wq_device { 59316ba94429SFrederic Weisbecker struct workqueue_struct *wq; 59326ba94429SFrederic Weisbecker struct device dev; 59336ba94429SFrederic Weisbecker }; 59346ba94429SFrederic Weisbecker 59356ba94429SFrederic Weisbecker static struct workqueue_struct *dev_to_wq(struct device *dev) 59366ba94429SFrederic Weisbecker { 59376ba94429SFrederic Weisbecker struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); 59386ba94429SFrederic Weisbecker 59396ba94429SFrederic Weisbecker return wq_dev->wq; 59406ba94429SFrederic Weisbecker } 59416ba94429SFrederic Weisbecker 59426ba94429SFrederic Weisbecker static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr, 59436ba94429SFrederic Weisbecker char *buf) 59446ba94429SFrederic Weisbecker { 59456ba94429SFrederic Weisbecker struct workqueue_struct *wq = dev_to_wq(dev); 59466ba94429SFrederic Weisbecker 59476ba94429SFrederic Weisbecker return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND)); 59486ba94429SFrederic Weisbecker } 59496ba94429SFrederic Weisbecker static DEVICE_ATTR_RO(per_cpu); 59506ba94429SFrederic Weisbecker 59516ba94429SFrederic Weisbecker static ssize_t max_active_show(struct device *dev, 59526ba94429SFrederic Weisbecker struct device_attribute *attr, char *buf) 59536ba94429SFrederic Weisbecker { 59546ba94429SFrederic Weisbecker struct workqueue_struct *wq = dev_to_wq(dev); 59556ba94429SFrederic Weisbecker 59566ba94429SFrederic Weisbecker return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active); 59576ba94429SFrederic Weisbecker } 59586ba94429SFrederic Weisbecker 59596ba94429SFrederic Weisbecker static ssize_t max_active_store(struct device *dev, 59606ba94429SFrederic Weisbecker struct device_attribute *attr, const char *buf, 59616ba94429SFrederic Weisbecker size_t count) 59626ba94429SFrederic Weisbecker { 59636ba94429SFrederic Weisbecker struct workqueue_struct *wq = dev_to_wq(dev); 59646ba94429SFrederic Weisbecker int val; 59656ba94429SFrederic Weisbecker 59666ba94429SFrederic Weisbecker if (sscanf(buf, "%d", &val) != 1 || val <= 0) 59676ba94429SFrederic Weisbecker return -EINVAL; 59686ba94429SFrederic Weisbecker 59696ba94429SFrederic Weisbecker workqueue_set_max_active(wq, val); 59706ba94429SFrederic Weisbecker return count; 59716ba94429SFrederic Weisbecker } 59726ba94429SFrederic Weisbecker static DEVICE_ATTR_RW(max_active); 59736ba94429SFrederic Weisbecker 59746ba94429SFrederic Weisbecker static struct attribute *wq_sysfs_attrs[] = { 59756ba94429SFrederic Weisbecker &dev_attr_per_cpu.attr, 59766ba94429SFrederic Weisbecker &dev_attr_max_active.attr, 59776ba94429SFrederic Weisbecker NULL, 59786ba94429SFrederic Weisbecker }; 59796ba94429SFrederic Weisbecker ATTRIBUTE_GROUPS(wq_sysfs); 59806ba94429SFrederic Weisbecker 59816ba94429SFrederic Weisbecker static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr, 59826ba94429SFrederic Weisbecker char *buf) 59836ba94429SFrederic Weisbecker { 59846ba94429SFrederic Weisbecker struct workqueue_struct *wq = dev_to_wq(dev); 59856ba94429SFrederic Weisbecker int written; 59866ba94429SFrederic Weisbecker 59876ba94429SFrederic Weisbecker mutex_lock(&wq->mutex); 59886ba94429SFrederic Weisbecker written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice); 59896ba94429SFrederic Weisbecker mutex_unlock(&wq->mutex); 59906ba94429SFrederic Weisbecker 59916ba94429SFrederic Weisbecker return written; 59926ba94429SFrederic Weisbecker } 59936ba94429SFrederic Weisbecker 59946ba94429SFrederic Weisbecker /* prepare workqueue_attrs for sysfs store operations */ 59956ba94429SFrederic Weisbecker static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq) 59966ba94429SFrederic Weisbecker { 59976ba94429SFrederic Weisbecker struct workqueue_attrs *attrs; 59986ba94429SFrederic Weisbecker 5999899a94feSLai Jiangshan lockdep_assert_held(&wq_pool_mutex); 6000899a94feSLai Jiangshan 6001be69d00dSThomas Gleixner attrs = alloc_workqueue_attrs(); 60026ba94429SFrederic Weisbecker if (!attrs) 60036ba94429SFrederic Weisbecker return NULL; 60046ba94429SFrederic Weisbecker 60056ba94429SFrederic Weisbecker copy_workqueue_attrs(attrs, wq->unbound_attrs); 60066ba94429SFrederic Weisbecker return attrs; 60076ba94429SFrederic Weisbecker } 60086ba94429SFrederic Weisbecker 60096ba94429SFrederic Weisbecker static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr, 60106ba94429SFrederic Weisbecker const char *buf, size_t count) 60116ba94429SFrederic Weisbecker { 60126ba94429SFrederic Weisbecker struct workqueue_struct *wq = dev_to_wq(dev); 60136ba94429SFrederic Weisbecker struct workqueue_attrs *attrs; 6014d4d3e257SLai Jiangshan int ret = -ENOMEM; 6015d4d3e257SLai Jiangshan 6016d4d3e257SLai Jiangshan apply_wqattrs_lock(); 60176ba94429SFrederic Weisbecker 60186ba94429SFrederic Weisbecker attrs = wq_sysfs_prep_attrs(wq); 60196ba94429SFrederic Weisbecker if (!attrs) 6020d4d3e257SLai Jiangshan goto out_unlock; 60216ba94429SFrederic Weisbecker 60226ba94429SFrederic Weisbecker if (sscanf(buf, "%d", &attrs->nice) == 1 && 60236ba94429SFrederic Weisbecker attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE) 6024d4d3e257SLai Jiangshan ret = apply_workqueue_attrs_locked(wq, attrs); 60256ba94429SFrederic Weisbecker else 60266ba94429SFrederic Weisbecker ret = -EINVAL; 60276ba94429SFrederic Weisbecker 6028d4d3e257SLai Jiangshan out_unlock: 6029d4d3e257SLai Jiangshan apply_wqattrs_unlock(); 60306ba94429SFrederic Weisbecker free_workqueue_attrs(attrs); 60316ba94429SFrederic Weisbecker return ret ?: count; 60326ba94429SFrederic Weisbecker } 60336ba94429SFrederic Weisbecker 60346ba94429SFrederic Weisbecker static ssize_t wq_cpumask_show(struct device *dev, 60356ba94429SFrederic Weisbecker struct device_attribute *attr, char *buf) 60366ba94429SFrederic Weisbecker { 60376ba94429SFrederic Weisbecker struct workqueue_struct *wq = dev_to_wq(dev); 60386ba94429SFrederic Weisbecker int written; 60396ba94429SFrederic Weisbecker 60406ba94429SFrederic Weisbecker mutex_lock(&wq->mutex); 60416ba94429SFrederic Weisbecker written = scnprintf(buf, PAGE_SIZE, "%*pb\n", 60426ba94429SFrederic Weisbecker cpumask_pr_args(wq->unbound_attrs->cpumask)); 60436ba94429SFrederic Weisbecker mutex_unlock(&wq->mutex); 60446ba94429SFrederic Weisbecker return written; 60456ba94429SFrederic Weisbecker } 60466ba94429SFrederic Weisbecker 60476ba94429SFrederic Weisbecker static ssize_t wq_cpumask_store(struct device *dev, 60486ba94429SFrederic Weisbecker struct device_attribute *attr, 60496ba94429SFrederic Weisbecker const char *buf, size_t count) 60506ba94429SFrederic Weisbecker { 60516ba94429SFrederic Weisbecker struct workqueue_struct *wq = dev_to_wq(dev); 60526ba94429SFrederic Weisbecker struct workqueue_attrs *attrs; 6053d4d3e257SLai Jiangshan int ret = -ENOMEM; 6054d4d3e257SLai Jiangshan 6055d4d3e257SLai Jiangshan apply_wqattrs_lock(); 60566ba94429SFrederic Weisbecker 60576ba94429SFrederic Weisbecker attrs = wq_sysfs_prep_attrs(wq); 60586ba94429SFrederic Weisbecker if (!attrs) 6059d4d3e257SLai Jiangshan goto out_unlock; 60606ba94429SFrederic Weisbecker 60616ba94429SFrederic Weisbecker ret = cpumask_parse(buf, attrs->cpumask); 60626ba94429SFrederic Weisbecker if (!ret) 6063d4d3e257SLai Jiangshan ret = apply_workqueue_attrs_locked(wq, attrs); 60646ba94429SFrederic Weisbecker 6065d4d3e257SLai Jiangshan out_unlock: 6066d4d3e257SLai Jiangshan apply_wqattrs_unlock(); 60676ba94429SFrederic Weisbecker free_workqueue_attrs(attrs); 60686ba94429SFrederic Weisbecker return ret ?: count; 60696ba94429SFrederic Weisbecker } 60706ba94429SFrederic Weisbecker 607163c5484eSTejun Heo static ssize_t wq_affn_scope_show(struct device *dev, 607263c5484eSTejun Heo struct device_attribute *attr, char *buf) 607363c5484eSTejun Heo { 607463c5484eSTejun Heo struct workqueue_struct *wq = dev_to_wq(dev); 607563c5484eSTejun Heo int written; 607663c5484eSTejun Heo 607763c5484eSTejun Heo mutex_lock(&wq->mutex); 6078523a301eSTejun Heo if (wq->unbound_attrs->affn_scope == WQ_AFFN_DFL) 6079523a301eSTejun Heo written = scnprintf(buf, PAGE_SIZE, "%s (%s)\n", 6080523a301eSTejun Heo wq_affn_names[WQ_AFFN_DFL], 6081523a301eSTejun Heo wq_affn_names[wq_affn_dfl]); 6082523a301eSTejun Heo else 608363c5484eSTejun Heo written = scnprintf(buf, PAGE_SIZE, "%s\n", 608463c5484eSTejun Heo wq_affn_names[wq->unbound_attrs->affn_scope]); 608563c5484eSTejun Heo mutex_unlock(&wq->mutex); 608663c5484eSTejun Heo 608763c5484eSTejun Heo return written; 608863c5484eSTejun Heo } 608963c5484eSTejun Heo 609063c5484eSTejun Heo static ssize_t wq_affn_scope_store(struct device *dev, 609163c5484eSTejun Heo struct device_attribute *attr, 609263c5484eSTejun Heo const char *buf, size_t count) 609363c5484eSTejun Heo { 609463c5484eSTejun Heo struct workqueue_struct *wq = dev_to_wq(dev); 609563c5484eSTejun Heo struct workqueue_attrs *attrs; 609663c5484eSTejun Heo int affn, ret = -ENOMEM; 609763c5484eSTejun Heo 609863c5484eSTejun Heo affn = parse_affn_scope(buf); 609963c5484eSTejun Heo if (affn < 0) 610063c5484eSTejun Heo return affn; 610163c5484eSTejun Heo 610263c5484eSTejun Heo apply_wqattrs_lock(); 610363c5484eSTejun Heo attrs = wq_sysfs_prep_attrs(wq); 610463c5484eSTejun Heo if (attrs) { 610563c5484eSTejun Heo attrs->affn_scope = affn; 610663c5484eSTejun Heo ret = apply_workqueue_attrs_locked(wq, attrs); 610763c5484eSTejun Heo } 610863c5484eSTejun Heo apply_wqattrs_unlock(); 610963c5484eSTejun Heo free_workqueue_attrs(attrs); 611063c5484eSTejun Heo return ret ?: count; 611163c5484eSTejun Heo } 611263c5484eSTejun Heo 61138639ecebSTejun Heo static ssize_t wq_affinity_strict_show(struct device *dev, 61148639ecebSTejun Heo struct device_attribute *attr, char *buf) 61158639ecebSTejun Heo { 61168639ecebSTejun Heo struct workqueue_struct *wq = dev_to_wq(dev); 61178639ecebSTejun Heo 61188639ecebSTejun Heo return scnprintf(buf, PAGE_SIZE, "%d\n", 61198639ecebSTejun Heo wq->unbound_attrs->affn_strict); 61208639ecebSTejun Heo } 61218639ecebSTejun Heo 61228639ecebSTejun Heo static ssize_t wq_affinity_strict_store(struct device *dev, 61238639ecebSTejun Heo struct device_attribute *attr, 61248639ecebSTejun Heo const char *buf, size_t count) 61258639ecebSTejun Heo { 61268639ecebSTejun Heo struct workqueue_struct *wq = dev_to_wq(dev); 61278639ecebSTejun Heo struct workqueue_attrs *attrs; 61288639ecebSTejun Heo int v, ret = -ENOMEM; 61298639ecebSTejun Heo 61308639ecebSTejun Heo if (sscanf(buf, "%d", &v) != 1) 61318639ecebSTejun Heo return -EINVAL; 61328639ecebSTejun Heo 61338639ecebSTejun Heo apply_wqattrs_lock(); 61348639ecebSTejun Heo attrs = wq_sysfs_prep_attrs(wq); 61358639ecebSTejun Heo if (attrs) { 61368639ecebSTejun Heo attrs->affn_strict = (bool)v; 61378639ecebSTejun Heo ret = apply_workqueue_attrs_locked(wq, attrs); 61388639ecebSTejun Heo } 61398639ecebSTejun Heo apply_wqattrs_unlock(); 61408639ecebSTejun Heo free_workqueue_attrs(attrs); 61418639ecebSTejun Heo return ret ?: count; 61428639ecebSTejun Heo } 61438639ecebSTejun Heo 61446ba94429SFrederic Weisbecker static struct device_attribute wq_sysfs_unbound_attrs[] = { 61456ba94429SFrederic Weisbecker __ATTR(nice, 0644, wq_nice_show, wq_nice_store), 61466ba94429SFrederic Weisbecker __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store), 614763c5484eSTejun Heo __ATTR(affinity_scope, 0644, wq_affn_scope_show, wq_affn_scope_store), 61488639ecebSTejun Heo __ATTR(affinity_strict, 0644, wq_affinity_strict_show, wq_affinity_strict_store), 61496ba94429SFrederic Weisbecker __ATTR_NULL, 61506ba94429SFrederic Weisbecker }; 61516ba94429SFrederic Weisbecker 61526ba94429SFrederic Weisbecker static struct bus_type wq_subsys = { 61536ba94429SFrederic Weisbecker .name = "workqueue", 61546ba94429SFrederic Weisbecker .dev_groups = wq_sysfs_groups, 61556ba94429SFrederic Weisbecker }; 61566ba94429SFrederic Weisbecker 6157b05a7928SFrederic Weisbecker static ssize_t wq_unbound_cpumask_show(struct device *dev, 6158b05a7928SFrederic Weisbecker struct device_attribute *attr, char *buf) 6159b05a7928SFrederic Weisbecker { 6160b05a7928SFrederic Weisbecker int written; 6161b05a7928SFrederic Weisbecker 6162042f7df1SLai Jiangshan mutex_lock(&wq_pool_mutex); 6163b05a7928SFrederic Weisbecker written = scnprintf(buf, PAGE_SIZE, "%*pb\n", 6164b05a7928SFrederic Weisbecker cpumask_pr_args(wq_unbound_cpumask)); 6165042f7df1SLai Jiangshan mutex_unlock(&wq_pool_mutex); 6166b05a7928SFrederic Weisbecker 6167b05a7928SFrederic Weisbecker return written; 6168b05a7928SFrederic Weisbecker } 6169b05a7928SFrederic Weisbecker 6170042f7df1SLai Jiangshan static ssize_t wq_unbound_cpumask_store(struct device *dev, 6171042f7df1SLai Jiangshan struct device_attribute *attr, const char *buf, size_t count) 6172042f7df1SLai Jiangshan { 6173042f7df1SLai Jiangshan cpumask_var_t cpumask; 6174042f7df1SLai Jiangshan int ret; 6175042f7df1SLai Jiangshan 6176042f7df1SLai Jiangshan if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL)) 6177042f7df1SLai Jiangshan return -ENOMEM; 6178042f7df1SLai Jiangshan 6179042f7df1SLai Jiangshan ret = cpumask_parse(buf, cpumask); 6180042f7df1SLai Jiangshan if (!ret) 6181042f7df1SLai Jiangshan ret = workqueue_set_unbound_cpumask(cpumask); 6182042f7df1SLai Jiangshan 6183042f7df1SLai Jiangshan free_cpumask_var(cpumask); 6184042f7df1SLai Jiangshan return ret ? ret : count; 6185042f7df1SLai Jiangshan } 6186042f7df1SLai Jiangshan 6187b05a7928SFrederic Weisbecker static struct device_attribute wq_sysfs_cpumask_attr = 6188042f7df1SLai Jiangshan __ATTR(cpumask, 0644, wq_unbound_cpumask_show, 6189042f7df1SLai Jiangshan wq_unbound_cpumask_store); 6190b05a7928SFrederic Weisbecker 61916ba94429SFrederic Weisbecker static int __init wq_sysfs_init(void) 61926ba94429SFrederic Weisbecker { 6193686f6697SGreg Kroah-Hartman struct device *dev_root; 6194b05a7928SFrederic Weisbecker int err; 6195b05a7928SFrederic Weisbecker 6196b05a7928SFrederic Weisbecker err = subsys_virtual_register(&wq_subsys, NULL); 6197b05a7928SFrederic Weisbecker if (err) 6198b05a7928SFrederic Weisbecker return err; 6199b05a7928SFrederic Weisbecker 6200686f6697SGreg Kroah-Hartman dev_root = bus_get_dev_root(&wq_subsys); 6201686f6697SGreg Kroah-Hartman if (dev_root) { 6202686f6697SGreg Kroah-Hartman err = device_create_file(dev_root, &wq_sysfs_cpumask_attr); 6203686f6697SGreg Kroah-Hartman put_device(dev_root); 6204686f6697SGreg Kroah-Hartman } 6205686f6697SGreg Kroah-Hartman return err; 62066ba94429SFrederic Weisbecker } 62076ba94429SFrederic Weisbecker core_initcall(wq_sysfs_init); 62086ba94429SFrederic Weisbecker 62096ba94429SFrederic Weisbecker static void wq_device_release(struct device *dev) 62106ba94429SFrederic Weisbecker { 62116ba94429SFrederic Weisbecker struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); 62126ba94429SFrederic Weisbecker 62136ba94429SFrederic Weisbecker kfree(wq_dev); 62146ba94429SFrederic Weisbecker } 62156ba94429SFrederic Weisbecker 62166ba94429SFrederic Weisbecker /** 62176ba94429SFrederic Weisbecker * workqueue_sysfs_register - make a workqueue visible in sysfs 62186ba94429SFrederic Weisbecker * @wq: the workqueue to register 62196ba94429SFrederic Weisbecker * 62206ba94429SFrederic Weisbecker * Expose @wq in sysfs under /sys/bus/workqueue/devices. 62216ba94429SFrederic Weisbecker * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set 62226ba94429SFrederic Weisbecker * which is the preferred method. 62236ba94429SFrederic Weisbecker * 62246ba94429SFrederic Weisbecker * Workqueue user should use this function directly iff it wants to apply 62256ba94429SFrederic Weisbecker * workqueue_attrs before making the workqueue visible in sysfs; otherwise, 62266ba94429SFrederic Weisbecker * apply_workqueue_attrs() may race against userland updating the 62276ba94429SFrederic Weisbecker * attributes. 62286ba94429SFrederic Weisbecker * 62296ba94429SFrederic Weisbecker * Return: 0 on success, -errno on failure. 62306ba94429SFrederic Weisbecker */ 62316ba94429SFrederic Weisbecker int workqueue_sysfs_register(struct workqueue_struct *wq) 62326ba94429SFrederic Weisbecker { 62336ba94429SFrederic Weisbecker struct wq_device *wq_dev; 62346ba94429SFrederic Weisbecker int ret; 62356ba94429SFrederic Weisbecker 62366ba94429SFrederic Weisbecker /* 6237402dd89dSShailendra Verma * Adjusting max_active or creating new pwqs by applying 62386ba94429SFrederic Weisbecker * attributes breaks ordering guarantee. Disallow exposing ordered 62396ba94429SFrederic Weisbecker * workqueues. 62406ba94429SFrederic Weisbecker */ 62410a94efb5STejun Heo if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 62426ba94429SFrederic Weisbecker return -EINVAL; 62436ba94429SFrederic Weisbecker 62446ba94429SFrederic Weisbecker wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); 62456ba94429SFrederic Weisbecker if (!wq_dev) 62466ba94429SFrederic Weisbecker return -ENOMEM; 62476ba94429SFrederic Weisbecker 62486ba94429SFrederic Weisbecker wq_dev->wq = wq; 62496ba94429SFrederic Weisbecker wq_dev->dev.bus = &wq_subsys; 62506ba94429SFrederic Weisbecker wq_dev->dev.release = wq_device_release; 625123217b44SLars-Peter Clausen dev_set_name(&wq_dev->dev, "%s", wq->name); 62526ba94429SFrederic Weisbecker 62536ba94429SFrederic Weisbecker /* 62546ba94429SFrederic Weisbecker * unbound_attrs are created separately. Suppress uevent until 62556ba94429SFrederic Weisbecker * everything is ready. 62566ba94429SFrederic Weisbecker */ 62576ba94429SFrederic Weisbecker dev_set_uevent_suppress(&wq_dev->dev, true); 62586ba94429SFrederic Weisbecker 62596ba94429SFrederic Weisbecker ret = device_register(&wq_dev->dev); 62606ba94429SFrederic Weisbecker if (ret) { 6261537f4146SArvind Yadav put_device(&wq_dev->dev); 62626ba94429SFrederic Weisbecker wq->wq_dev = NULL; 62636ba94429SFrederic Weisbecker return ret; 62646ba94429SFrederic Weisbecker } 62656ba94429SFrederic Weisbecker 62666ba94429SFrederic Weisbecker if (wq->flags & WQ_UNBOUND) { 62676ba94429SFrederic Weisbecker struct device_attribute *attr; 62686ba94429SFrederic Weisbecker 62696ba94429SFrederic Weisbecker for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) { 62706ba94429SFrederic Weisbecker ret = device_create_file(&wq_dev->dev, attr); 62716ba94429SFrederic Weisbecker if (ret) { 62726ba94429SFrederic Weisbecker device_unregister(&wq_dev->dev); 62736ba94429SFrederic Weisbecker wq->wq_dev = NULL; 62746ba94429SFrederic Weisbecker return ret; 62756ba94429SFrederic Weisbecker } 62766ba94429SFrederic Weisbecker } 62776ba94429SFrederic Weisbecker } 62786ba94429SFrederic Weisbecker 62796ba94429SFrederic Weisbecker dev_set_uevent_suppress(&wq_dev->dev, false); 62806ba94429SFrederic Weisbecker kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD); 62816ba94429SFrederic Weisbecker return 0; 62826ba94429SFrederic Weisbecker } 62836ba94429SFrederic Weisbecker 62846ba94429SFrederic Weisbecker /** 62856ba94429SFrederic Weisbecker * workqueue_sysfs_unregister - undo workqueue_sysfs_register() 62866ba94429SFrederic Weisbecker * @wq: the workqueue to unregister 62876ba94429SFrederic Weisbecker * 62886ba94429SFrederic Weisbecker * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister. 62896ba94429SFrederic Weisbecker */ 62906ba94429SFrederic Weisbecker static void workqueue_sysfs_unregister(struct workqueue_struct *wq) 62916ba94429SFrederic Weisbecker { 62926ba94429SFrederic Weisbecker struct wq_device *wq_dev = wq->wq_dev; 62936ba94429SFrederic Weisbecker 62946ba94429SFrederic Weisbecker if (!wq->wq_dev) 62956ba94429SFrederic Weisbecker return; 62966ba94429SFrederic Weisbecker 62976ba94429SFrederic Weisbecker wq->wq_dev = NULL; 62986ba94429SFrederic Weisbecker device_unregister(&wq_dev->dev); 62996ba94429SFrederic Weisbecker } 63006ba94429SFrederic Weisbecker #else /* CONFIG_SYSFS */ 63016ba94429SFrederic Weisbecker static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { } 63026ba94429SFrederic Weisbecker #endif /* CONFIG_SYSFS */ 63036ba94429SFrederic Weisbecker 630482607adcSTejun Heo /* 630582607adcSTejun Heo * Workqueue watchdog. 630682607adcSTejun Heo * 630782607adcSTejun Heo * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal 630882607adcSTejun Heo * flush dependency, a concurrency managed work item which stays RUNNING 630982607adcSTejun Heo * indefinitely. Workqueue stalls can be very difficult to debug as the 631082607adcSTejun Heo * usual warning mechanisms don't trigger and internal workqueue state is 631182607adcSTejun Heo * largely opaque. 631282607adcSTejun Heo * 631382607adcSTejun Heo * Workqueue watchdog monitors all worker pools periodically and dumps 631482607adcSTejun Heo * state if some pools failed to make forward progress for a while where 631582607adcSTejun Heo * forward progress is defined as the first item on ->worklist changing. 631682607adcSTejun Heo * 631782607adcSTejun Heo * This mechanism is controlled through the kernel parameter 631882607adcSTejun Heo * "workqueue.watchdog_thresh" which can be updated at runtime through the 631982607adcSTejun Heo * corresponding sysfs parameter file. 632082607adcSTejun Heo */ 632182607adcSTejun Heo #ifdef CONFIG_WQ_WATCHDOG 632282607adcSTejun Heo 632382607adcSTejun Heo static unsigned long wq_watchdog_thresh = 30; 63245cd79d6aSKees Cook static struct timer_list wq_watchdog_timer; 632582607adcSTejun Heo 632682607adcSTejun Heo static unsigned long wq_watchdog_touched = INITIAL_JIFFIES; 632782607adcSTejun Heo static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES; 632882607adcSTejun Heo 6329cd2440d6SPetr Mladek /* 6330cd2440d6SPetr Mladek * Show workers that might prevent the processing of pending work items. 6331cd2440d6SPetr Mladek * The only candidates are CPU-bound workers in the running state. 6332cd2440d6SPetr Mladek * Pending work items should be handled by another idle worker 6333cd2440d6SPetr Mladek * in all other situations. 6334cd2440d6SPetr Mladek */ 6335cd2440d6SPetr Mladek static void show_cpu_pool_hog(struct worker_pool *pool) 6336cd2440d6SPetr Mladek { 6337cd2440d6SPetr Mladek struct worker *worker; 6338cd2440d6SPetr Mladek unsigned long flags; 6339cd2440d6SPetr Mladek int bkt; 6340cd2440d6SPetr Mladek 6341cd2440d6SPetr Mladek raw_spin_lock_irqsave(&pool->lock, flags); 6342cd2440d6SPetr Mladek 6343cd2440d6SPetr Mladek hash_for_each(pool->busy_hash, bkt, worker, hentry) { 6344cd2440d6SPetr Mladek if (task_is_running(worker->task)) { 6345cd2440d6SPetr Mladek /* 6346cd2440d6SPetr Mladek * Defer printing to avoid deadlocks in console 6347cd2440d6SPetr Mladek * drivers that queue work while holding locks 6348cd2440d6SPetr Mladek * also taken in their write paths. 6349cd2440d6SPetr Mladek */ 6350cd2440d6SPetr Mladek printk_deferred_enter(); 6351cd2440d6SPetr Mladek 6352cd2440d6SPetr Mladek pr_info("pool %d:\n", pool->id); 6353cd2440d6SPetr Mladek sched_show_task(worker->task); 6354cd2440d6SPetr Mladek 6355cd2440d6SPetr Mladek printk_deferred_exit(); 6356cd2440d6SPetr Mladek } 6357cd2440d6SPetr Mladek } 6358cd2440d6SPetr Mladek 6359cd2440d6SPetr Mladek raw_spin_unlock_irqrestore(&pool->lock, flags); 6360cd2440d6SPetr Mladek } 6361cd2440d6SPetr Mladek 6362cd2440d6SPetr Mladek static void show_cpu_pools_hogs(void) 6363cd2440d6SPetr Mladek { 6364cd2440d6SPetr Mladek struct worker_pool *pool; 6365cd2440d6SPetr Mladek int pi; 6366cd2440d6SPetr Mladek 6367cd2440d6SPetr Mladek pr_info("Showing backtraces of running workers in stalled CPU-bound worker pools:\n"); 6368cd2440d6SPetr Mladek 6369cd2440d6SPetr Mladek rcu_read_lock(); 6370cd2440d6SPetr Mladek 6371cd2440d6SPetr Mladek for_each_pool(pool, pi) { 6372cd2440d6SPetr Mladek if (pool->cpu_stall) 6373cd2440d6SPetr Mladek show_cpu_pool_hog(pool); 6374cd2440d6SPetr Mladek 6375cd2440d6SPetr Mladek } 6376cd2440d6SPetr Mladek 6377cd2440d6SPetr Mladek rcu_read_unlock(); 6378cd2440d6SPetr Mladek } 6379cd2440d6SPetr Mladek 638082607adcSTejun Heo static void wq_watchdog_reset_touched(void) 638182607adcSTejun Heo { 638282607adcSTejun Heo int cpu; 638382607adcSTejun Heo 638482607adcSTejun Heo wq_watchdog_touched = jiffies; 638582607adcSTejun Heo for_each_possible_cpu(cpu) 638682607adcSTejun Heo per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; 638782607adcSTejun Heo } 638882607adcSTejun Heo 63895cd79d6aSKees Cook static void wq_watchdog_timer_fn(struct timer_list *unused) 639082607adcSTejun Heo { 639182607adcSTejun Heo unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ; 639282607adcSTejun Heo bool lockup_detected = false; 6393cd2440d6SPetr Mladek bool cpu_pool_stall = false; 6394940d71c6SSergey Senozhatsky unsigned long now = jiffies; 639582607adcSTejun Heo struct worker_pool *pool; 639682607adcSTejun Heo int pi; 639782607adcSTejun Heo 639882607adcSTejun Heo if (!thresh) 639982607adcSTejun Heo return; 640082607adcSTejun Heo 640182607adcSTejun Heo rcu_read_lock(); 640282607adcSTejun Heo 640382607adcSTejun Heo for_each_pool(pool, pi) { 640482607adcSTejun Heo unsigned long pool_ts, touched, ts; 640582607adcSTejun Heo 6406cd2440d6SPetr Mladek pool->cpu_stall = false; 640782607adcSTejun Heo if (list_empty(&pool->worklist)) 640882607adcSTejun Heo continue; 640982607adcSTejun Heo 6410940d71c6SSergey Senozhatsky /* 6411940d71c6SSergey Senozhatsky * If a virtual machine is stopped by the host it can look to 6412940d71c6SSergey Senozhatsky * the watchdog like a stall. 6413940d71c6SSergey Senozhatsky */ 6414940d71c6SSergey Senozhatsky kvm_check_and_clear_guest_paused(); 6415940d71c6SSergey Senozhatsky 641682607adcSTejun Heo /* get the latest of pool and touched timestamps */ 641789e28ce6SWang Qing if (pool->cpu >= 0) 641889e28ce6SWang Qing touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu)); 641989e28ce6SWang Qing else 642082607adcSTejun Heo touched = READ_ONCE(wq_watchdog_touched); 642189e28ce6SWang Qing pool_ts = READ_ONCE(pool->watchdog_ts); 642282607adcSTejun Heo 642382607adcSTejun Heo if (time_after(pool_ts, touched)) 642482607adcSTejun Heo ts = pool_ts; 642582607adcSTejun Heo else 642682607adcSTejun Heo ts = touched; 642782607adcSTejun Heo 642882607adcSTejun Heo /* did we stall? */ 6429940d71c6SSergey Senozhatsky if (time_after(now, ts + thresh)) { 643082607adcSTejun Heo lockup_detected = true; 6431cd2440d6SPetr Mladek if (pool->cpu >= 0) { 6432cd2440d6SPetr Mladek pool->cpu_stall = true; 6433cd2440d6SPetr Mladek cpu_pool_stall = true; 6434cd2440d6SPetr Mladek } 643582607adcSTejun Heo pr_emerg("BUG: workqueue lockup - pool"); 643682607adcSTejun Heo pr_cont_pool_info(pool); 643782607adcSTejun Heo pr_cont(" stuck for %us!\n", 6438940d71c6SSergey Senozhatsky jiffies_to_msecs(now - pool_ts) / 1000); 643982607adcSTejun Heo } 6440cd2440d6SPetr Mladek 6441cd2440d6SPetr Mladek 644282607adcSTejun Heo } 644382607adcSTejun Heo 644482607adcSTejun Heo rcu_read_unlock(); 644582607adcSTejun Heo 644682607adcSTejun Heo if (lockup_detected) 644755df0933SImran Khan show_all_workqueues(); 644882607adcSTejun Heo 6449cd2440d6SPetr Mladek if (cpu_pool_stall) 6450cd2440d6SPetr Mladek show_cpu_pools_hogs(); 6451cd2440d6SPetr Mladek 645282607adcSTejun Heo wq_watchdog_reset_touched(); 645382607adcSTejun Heo mod_timer(&wq_watchdog_timer, jiffies + thresh); 645482607adcSTejun Heo } 645582607adcSTejun Heo 6456cb9d7fd5SVincent Whitchurch notrace void wq_watchdog_touch(int cpu) 645782607adcSTejun Heo { 645882607adcSTejun Heo if (cpu >= 0) 645982607adcSTejun Heo per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; 646089e28ce6SWang Qing 646182607adcSTejun Heo wq_watchdog_touched = jiffies; 646282607adcSTejun Heo } 646382607adcSTejun Heo 646482607adcSTejun Heo static void wq_watchdog_set_thresh(unsigned long thresh) 646582607adcSTejun Heo { 646682607adcSTejun Heo wq_watchdog_thresh = 0; 646782607adcSTejun Heo del_timer_sync(&wq_watchdog_timer); 646882607adcSTejun Heo 646982607adcSTejun Heo if (thresh) { 647082607adcSTejun Heo wq_watchdog_thresh = thresh; 647182607adcSTejun Heo wq_watchdog_reset_touched(); 647282607adcSTejun Heo mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ); 647382607adcSTejun Heo } 647482607adcSTejun Heo } 647582607adcSTejun Heo 647682607adcSTejun Heo static int wq_watchdog_param_set_thresh(const char *val, 647782607adcSTejun Heo const struct kernel_param *kp) 647882607adcSTejun Heo { 647982607adcSTejun Heo unsigned long thresh; 648082607adcSTejun Heo int ret; 648182607adcSTejun Heo 648282607adcSTejun Heo ret = kstrtoul(val, 0, &thresh); 648382607adcSTejun Heo if (ret) 648482607adcSTejun Heo return ret; 648582607adcSTejun Heo 648682607adcSTejun Heo if (system_wq) 648782607adcSTejun Heo wq_watchdog_set_thresh(thresh); 648882607adcSTejun Heo else 648982607adcSTejun Heo wq_watchdog_thresh = thresh; 649082607adcSTejun Heo 649182607adcSTejun Heo return 0; 649282607adcSTejun Heo } 649382607adcSTejun Heo 649482607adcSTejun Heo static const struct kernel_param_ops wq_watchdog_thresh_ops = { 649582607adcSTejun Heo .set = wq_watchdog_param_set_thresh, 649682607adcSTejun Heo .get = param_get_ulong, 649782607adcSTejun Heo }; 649882607adcSTejun Heo 649982607adcSTejun Heo module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh, 650082607adcSTejun Heo 0644); 650182607adcSTejun Heo 650282607adcSTejun Heo static void wq_watchdog_init(void) 650382607adcSTejun Heo { 65045cd79d6aSKees Cook timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE); 650582607adcSTejun Heo wq_watchdog_set_thresh(wq_watchdog_thresh); 650682607adcSTejun Heo } 650782607adcSTejun Heo 650882607adcSTejun Heo #else /* CONFIG_WQ_WATCHDOG */ 650982607adcSTejun Heo 651082607adcSTejun Heo static inline void wq_watchdog_init(void) { } 651182607adcSTejun Heo 651282607adcSTejun Heo #endif /* CONFIG_WQ_WATCHDOG */ 651382607adcSTejun Heo 6514b2c562a7STejun Heo static void __init restrict_unbound_cpumask(const char *name, const struct cpumask *mask) 6515b2c562a7STejun Heo { 6516b2c562a7STejun Heo if (!cpumask_intersects(wq_unbound_cpumask, mask)) { 6517b2c562a7STejun Heo pr_warn("workqueue: Restricting unbound_cpumask (%*pb) with %s (%*pb) leaves no CPU, ignoring\n", 6518b2c562a7STejun Heo cpumask_pr_args(wq_unbound_cpumask), name, cpumask_pr_args(mask)); 6519b2c562a7STejun Heo return; 6520b2c562a7STejun Heo } 6521b2c562a7STejun Heo 6522b2c562a7STejun Heo cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, mask); 6523b2c562a7STejun Heo } 6524b2c562a7STejun Heo 65253347fa09STejun Heo /** 65263347fa09STejun Heo * workqueue_init_early - early init for workqueue subsystem 65273347fa09STejun Heo * 65282930155bSTejun Heo * This is the first step of three-staged workqueue subsystem initialization and 65292930155bSTejun Heo * invoked as soon as the bare basics - memory allocation, cpumasks and idr are 65302930155bSTejun Heo * up. It sets up all the data structures and system workqueues and allows early 65312930155bSTejun Heo * boot code to create workqueues and queue/cancel work items. Actual work item 65322930155bSTejun Heo * execution starts only after kthreads can be created and scheduled right 65332930155bSTejun Heo * before early initcalls. 65343347fa09STejun Heo */ 65352333e829SYu Chen void __init workqueue_init_early(void) 65361da177e4SLinus Torvalds { 653784193c07STejun Heo struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_SYSTEM]; 65387a4e344cSTejun Heo int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; 65397a4e344cSTejun Heo int i, cpu; 6540c34056a3STejun Heo 654110cdb157SLai Jiangshan BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); 6542e904e6c2STejun Heo 6543b05a7928SFrederic Weisbecker BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL)); 6544b2c562a7STejun Heo cpumask_copy(wq_unbound_cpumask, cpu_possible_mask); 6545b2c562a7STejun Heo restrict_unbound_cpumask("HK_TYPE_WQ", housekeeping_cpumask(HK_TYPE_WQ)); 6546b2c562a7STejun Heo restrict_unbound_cpumask("HK_TYPE_DOMAIN", housekeeping_cpumask(HK_TYPE_DOMAIN)); 6547ace3c549Stiozhang if (!cpumask_empty(&wq_cmdline_cpumask)) 6548b2c562a7STejun Heo restrict_unbound_cpumask("workqueue.unbound_cpus", &wq_cmdline_cpumask); 6549ace3c549Stiozhang 6550e904e6c2STejun Heo pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); 6551e904e6c2STejun Heo 65522930155bSTejun Heo wq_update_pod_attrs_buf = alloc_workqueue_attrs(); 65532930155bSTejun Heo BUG_ON(!wq_update_pod_attrs_buf); 65542930155bSTejun Heo 655584193c07STejun Heo /* initialize WQ_AFFN_SYSTEM pods */ 655684193c07STejun Heo pt->pod_cpus = kcalloc(1, sizeof(pt->pod_cpus[0]), GFP_KERNEL); 655784193c07STejun Heo pt->pod_node = kcalloc(1, sizeof(pt->pod_node[0]), GFP_KERNEL); 655884193c07STejun Heo pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL); 655984193c07STejun Heo BUG_ON(!pt->pod_cpus || !pt->pod_node || !pt->cpu_pod); 656084193c07STejun Heo 656184193c07STejun Heo BUG_ON(!zalloc_cpumask_var_node(&pt->pod_cpus[0], GFP_KERNEL, NUMA_NO_NODE)); 656284193c07STejun Heo 656384193c07STejun Heo pt->nr_pods = 1; 656484193c07STejun Heo cpumask_copy(pt->pod_cpus[0], cpu_possible_mask); 656584193c07STejun Heo pt->pod_node[0] = NUMA_NO_NODE; 656684193c07STejun Heo pt->cpu_pod[0] = 0; 656784193c07STejun Heo 6568706026c2STejun Heo /* initialize CPU pools */ 656929c91e99STejun Heo for_each_possible_cpu(cpu) { 65704ce62e9eSTejun Heo struct worker_pool *pool; 65718b03ae3cSTejun Heo 65727a4e344cSTejun Heo i = 0; 6573f02ae73aSTejun Heo for_each_cpu_worker_pool(pool, cpu) { 65747a4e344cSTejun Heo BUG_ON(init_worker_pool(pool)); 6575ec22ca5eSTejun Heo pool->cpu = cpu; 65767a4e344cSTejun Heo cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); 65779546b29eSTejun Heo cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu)); 65787a4e344cSTejun Heo pool->attrs->nice = std_nice[i++]; 65798639ecebSTejun Heo pool->attrs->affn_strict = true; 6580f3f90ad4STejun Heo pool->node = cpu_to_node(cpu); 65817a4e344cSTejun Heo 65829daf9e67STejun Heo /* alloc pool ID */ 658368e13a67SLai Jiangshan mutex_lock(&wq_pool_mutex); 65849daf9e67STejun Heo BUG_ON(worker_pool_assign_id(pool)); 658568e13a67SLai Jiangshan mutex_unlock(&wq_pool_mutex); 65864ce62e9eSTejun Heo } 65878b03ae3cSTejun Heo } 65888b03ae3cSTejun Heo 65898a2b7538STejun Heo /* create default unbound and ordered wq attrs */ 659029c91e99STejun Heo for (i = 0; i < NR_STD_WORKER_POOLS; i++) { 659129c91e99STejun Heo struct workqueue_attrs *attrs; 659229c91e99STejun Heo 6593be69d00dSThomas Gleixner BUG_ON(!(attrs = alloc_workqueue_attrs())); 659429c91e99STejun Heo attrs->nice = std_nice[i]; 659529c91e99STejun Heo unbound_std_wq_attrs[i] = attrs; 65968a2b7538STejun Heo 65978a2b7538STejun Heo /* 65988a2b7538STejun Heo * An ordered wq should have only one pwq as ordering is 65998a2b7538STejun Heo * guaranteed by max_active which is enforced by pwqs. 66008a2b7538STejun Heo */ 6601be69d00dSThomas Gleixner BUG_ON(!(attrs = alloc_workqueue_attrs())); 66028a2b7538STejun Heo attrs->nice = std_nice[i]; 6603af73f5c9STejun Heo attrs->ordered = true; 66048a2b7538STejun Heo ordered_wq_attrs[i] = attrs; 660529c91e99STejun Heo } 660629c91e99STejun Heo 6607d320c038STejun Heo system_wq = alloc_workqueue("events", 0, 0); 66081aabe902SJoonsoo Kim system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0); 6609d320c038STejun Heo system_long_wq = alloc_workqueue("events_long", 0, 0); 6610f3421797STejun Heo system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, 6611636b927eSTejun Heo WQ_MAX_ACTIVE); 661224d51addSTejun Heo system_freezable_wq = alloc_workqueue("events_freezable", 661324d51addSTejun Heo WQ_FREEZABLE, 0); 66140668106cSViresh Kumar system_power_efficient_wq = alloc_workqueue("events_power_efficient", 66150668106cSViresh Kumar WQ_POWER_EFFICIENT, 0); 66167bff1820SGreg Kroah-Hartman system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient", 66170668106cSViresh Kumar WQ_FREEZABLE | WQ_POWER_EFFICIENT, 66180668106cSViresh Kumar 0); 66191aabe902SJoonsoo Kim BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq || 66200668106cSViresh Kumar !system_unbound_wq || !system_freezable_wq || 66210668106cSViresh Kumar !system_power_efficient_wq || 66220668106cSViresh Kumar !system_freezable_power_efficient_wq); 66233347fa09STejun Heo } 66243347fa09STejun Heo 6625aa6fde93STejun Heo static void __init wq_cpu_intensive_thresh_init(void) 6626aa6fde93STejun Heo { 6627aa6fde93STejun Heo unsigned long thresh; 6628aa6fde93STejun Heo unsigned long bogo; 6629aa6fde93STejun Heo 6630dd64c873SZqiang pwq_release_worker = kthread_create_worker(0, "pool_workqueue_release"); 6631dd64c873SZqiang BUG_ON(IS_ERR(pwq_release_worker)); 6632dd64c873SZqiang 6633aa6fde93STejun Heo /* if the user set it to a specific value, keep it */ 6634aa6fde93STejun Heo if (wq_cpu_intensive_thresh_us != ULONG_MAX) 6635aa6fde93STejun Heo return; 6636aa6fde93STejun Heo 6637aa6fde93STejun Heo /* 6638aa6fde93STejun Heo * The default of 10ms is derived from the fact that most modern (as of 6639aa6fde93STejun Heo * 2023) processors can do a lot in 10ms and that it's just below what 6640aa6fde93STejun Heo * most consider human-perceivable. However, the kernel also runs on a 6641aa6fde93STejun Heo * lot slower CPUs including microcontrollers where the threshold is way 6642aa6fde93STejun Heo * too low. 6643aa6fde93STejun Heo * 6644aa6fde93STejun Heo * Let's scale up the threshold upto 1 second if BogoMips is below 4000. 6645aa6fde93STejun Heo * This is by no means accurate but it doesn't have to be. The mechanism 6646aa6fde93STejun Heo * is still useful even when the threshold is fully scaled up. Also, as 6647aa6fde93STejun Heo * the reports would usually be applicable to everyone, some machines 6648aa6fde93STejun Heo * operating on longer thresholds won't significantly diminish their 6649aa6fde93STejun Heo * usefulness. 6650aa6fde93STejun Heo */ 6651aa6fde93STejun Heo thresh = 10 * USEC_PER_MSEC; 6652aa6fde93STejun Heo 6653aa6fde93STejun Heo /* see init/calibrate.c for lpj -> BogoMIPS calculation */ 6654aa6fde93STejun Heo bogo = max_t(unsigned long, loops_per_jiffy / 500000 * HZ, 1); 6655aa6fde93STejun Heo if (bogo < 4000) 6656aa6fde93STejun Heo thresh = min_t(unsigned long, thresh * 4000 / bogo, USEC_PER_SEC); 6657aa6fde93STejun Heo 6658aa6fde93STejun Heo pr_debug("wq_cpu_intensive_thresh: lpj=%lu BogoMIPS=%lu thresh_us=%lu\n", 6659aa6fde93STejun Heo loops_per_jiffy, bogo, thresh); 6660aa6fde93STejun Heo 6661aa6fde93STejun Heo wq_cpu_intensive_thresh_us = thresh; 6662aa6fde93STejun Heo } 6663aa6fde93STejun Heo 66643347fa09STejun Heo /** 66653347fa09STejun Heo * workqueue_init - bring workqueue subsystem fully online 66663347fa09STejun Heo * 66672930155bSTejun Heo * This is the second step of three-staged workqueue subsystem initialization 66682930155bSTejun Heo * and invoked as soon as kthreads can be created and scheduled. Workqueues have 66692930155bSTejun Heo * been created and work items queued on them, but there are no kworkers 66702930155bSTejun Heo * executing the work items yet. Populate the worker pools with the initial 66712930155bSTejun Heo * workers and enable future kworker creations. 66723347fa09STejun Heo */ 66732333e829SYu Chen void __init workqueue_init(void) 66743347fa09STejun Heo { 66752186d9f9STejun Heo struct workqueue_struct *wq; 66763347fa09STejun Heo struct worker_pool *pool; 66773347fa09STejun Heo int cpu, bkt; 66783347fa09STejun Heo 6679aa6fde93STejun Heo wq_cpu_intensive_thresh_init(); 6680aa6fde93STejun Heo 66812186d9f9STejun Heo mutex_lock(&wq_pool_mutex); 66822186d9f9STejun Heo 66832930155bSTejun Heo /* 66842930155bSTejun Heo * Per-cpu pools created earlier could be missing node hint. Fix them 66852930155bSTejun Heo * up. Also, create a rescuer for workqueues that requested it. 66862930155bSTejun Heo */ 66872186d9f9STejun Heo for_each_possible_cpu(cpu) { 66882186d9f9STejun Heo for_each_cpu_worker_pool(pool, cpu) { 66892186d9f9STejun Heo pool->node = cpu_to_node(cpu); 66902186d9f9STejun Heo } 66912186d9f9STejun Heo } 66922186d9f9STejun Heo 669340c17f75STejun Heo list_for_each_entry(wq, &workqueues, list) { 669440c17f75STejun Heo WARN(init_rescuer(wq), 669540c17f75STejun Heo "workqueue: failed to create early rescuer for %s", 669640c17f75STejun Heo wq->name); 669740c17f75STejun Heo } 66982186d9f9STejun Heo 66992186d9f9STejun Heo mutex_unlock(&wq_pool_mutex); 67002186d9f9STejun Heo 67013347fa09STejun Heo /* create the initial workers */ 67023347fa09STejun Heo for_each_online_cpu(cpu) { 67033347fa09STejun Heo for_each_cpu_worker_pool(pool, cpu) { 67043347fa09STejun Heo pool->flags &= ~POOL_DISASSOCIATED; 67053347fa09STejun Heo BUG_ON(!create_worker(pool)); 67063347fa09STejun Heo } 67073347fa09STejun Heo } 67083347fa09STejun Heo 67093347fa09STejun Heo hash_for_each(unbound_pool_hash, bkt, pool, hash_node) 67103347fa09STejun Heo BUG_ON(!create_worker(pool)); 67113347fa09STejun Heo 67123347fa09STejun Heo wq_online = true; 671382607adcSTejun Heo wq_watchdog_init(); 67141da177e4SLinus Torvalds } 6715c4f135d6STetsuo Handa 6716025e1684STejun Heo /* 6717025e1684STejun Heo * Initialize @pt by first initializing @pt->cpu_pod[] with pod IDs according to 6718025e1684STejun Heo * @cpu_shares_pod(). Each subset of CPUs that share a pod is assigned a unique 6719025e1684STejun Heo * and consecutive pod ID. The rest of @pt is initialized accordingly. 6720025e1684STejun Heo */ 6721025e1684STejun Heo static void __init init_pod_type(struct wq_pod_type *pt, 6722025e1684STejun Heo bool (*cpus_share_pod)(int, int)) 6723025e1684STejun Heo { 6724025e1684STejun Heo int cur, pre, cpu, pod; 6725025e1684STejun Heo 6726025e1684STejun Heo pt->nr_pods = 0; 6727025e1684STejun Heo 6728025e1684STejun Heo /* init @pt->cpu_pod[] according to @cpus_share_pod() */ 6729025e1684STejun Heo pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL); 6730025e1684STejun Heo BUG_ON(!pt->cpu_pod); 6731025e1684STejun Heo 6732025e1684STejun Heo for_each_possible_cpu(cur) { 6733025e1684STejun Heo for_each_possible_cpu(pre) { 6734025e1684STejun Heo if (pre >= cur) { 6735025e1684STejun Heo pt->cpu_pod[cur] = pt->nr_pods++; 6736025e1684STejun Heo break; 6737025e1684STejun Heo } 6738025e1684STejun Heo if (cpus_share_pod(cur, pre)) { 6739025e1684STejun Heo pt->cpu_pod[cur] = pt->cpu_pod[pre]; 6740025e1684STejun Heo break; 6741025e1684STejun Heo } 6742025e1684STejun Heo } 6743025e1684STejun Heo } 6744025e1684STejun Heo 6745025e1684STejun Heo /* init the rest to match @pt->cpu_pod[] */ 6746025e1684STejun Heo pt->pod_cpus = kcalloc(pt->nr_pods, sizeof(pt->pod_cpus[0]), GFP_KERNEL); 6747025e1684STejun Heo pt->pod_node = kcalloc(pt->nr_pods, sizeof(pt->pod_node[0]), GFP_KERNEL); 6748025e1684STejun Heo BUG_ON(!pt->pod_cpus || !pt->pod_node); 6749025e1684STejun Heo 6750025e1684STejun Heo for (pod = 0; pod < pt->nr_pods; pod++) 6751025e1684STejun Heo BUG_ON(!zalloc_cpumask_var(&pt->pod_cpus[pod], GFP_KERNEL)); 6752025e1684STejun Heo 6753025e1684STejun Heo for_each_possible_cpu(cpu) { 6754025e1684STejun Heo cpumask_set_cpu(cpu, pt->pod_cpus[pt->cpu_pod[cpu]]); 6755025e1684STejun Heo pt->pod_node[pt->cpu_pod[cpu]] = cpu_to_node(cpu); 6756025e1684STejun Heo } 6757025e1684STejun Heo } 6758025e1684STejun Heo 675963c5484eSTejun Heo static bool __init cpus_dont_share(int cpu0, int cpu1) 676063c5484eSTejun Heo { 676163c5484eSTejun Heo return false; 676263c5484eSTejun Heo } 676363c5484eSTejun Heo 676463c5484eSTejun Heo static bool __init cpus_share_smt(int cpu0, int cpu1) 676563c5484eSTejun Heo { 676663c5484eSTejun Heo #ifdef CONFIG_SCHED_SMT 676763c5484eSTejun Heo return cpumask_test_cpu(cpu0, cpu_smt_mask(cpu1)); 676863c5484eSTejun Heo #else 676963c5484eSTejun Heo return false; 677063c5484eSTejun Heo #endif 677163c5484eSTejun Heo } 677263c5484eSTejun Heo 6773025e1684STejun Heo static bool __init cpus_share_numa(int cpu0, int cpu1) 6774025e1684STejun Heo { 6775025e1684STejun Heo return cpu_to_node(cpu0) == cpu_to_node(cpu1); 6776025e1684STejun Heo } 6777025e1684STejun Heo 67782930155bSTejun Heo /** 67792930155bSTejun Heo * workqueue_init_topology - initialize CPU pods for unbound workqueues 67802930155bSTejun Heo * 67812930155bSTejun Heo * This is the third step of there-staged workqueue subsystem initialization and 67822930155bSTejun Heo * invoked after SMP and topology information are fully initialized. It 67832930155bSTejun Heo * initializes the unbound CPU pods accordingly. 67842930155bSTejun Heo */ 67852930155bSTejun Heo void __init workqueue_init_topology(void) 6786a86feae6STejun Heo { 67872930155bSTejun Heo struct workqueue_struct *wq; 6788025e1684STejun Heo int cpu; 6789a86feae6STejun Heo 679063c5484eSTejun Heo init_pod_type(&wq_pod_types[WQ_AFFN_CPU], cpus_dont_share); 679163c5484eSTejun Heo init_pod_type(&wq_pod_types[WQ_AFFN_SMT], cpus_share_smt); 679263c5484eSTejun Heo init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache); 6793025e1684STejun Heo init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa); 6794a86feae6STejun Heo 67952930155bSTejun Heo mutex_lock(&wq_pool_mutex); 6796a86feae6STejun Heo 6797a86feae6STejun Heo /* 67982930155bSTejun Heo * Workqueues allocated earlier would have all CPUs sharing the default 67992930155bSTejun Heo * worker pool. Explicitly call wq_update_pod() on all workqueue and CPU 68002930155bSTejun Heo * combinations to apply per-pod sharing. 68012930155bSTejun Heo */ 68022930155bSTejun Heo list_for_each_entry(wq, &workqueues, list) { 68036741dd3fSGreg Kroah-Hartman for_each_online_cpu(cpu) { 68042930155bSTejun Heo wq_update_pod(wq, cpu, cpu, true); 68052930155bSTejun Heo } 68062930155bSTejun Heo } 68072930155bSTejun Heo 68082930155bSTejun Heo mutex_unlock(&wq_pool_mutex); 6809a86feae6STejun Heo } 6810a86feae6STejun Heo 681120bdedafSTetsuo Handa void __warn_flushing_systemwide_wq(void) 681220bdedafSTetsuo Handa { 681320bdedafSTetsuo Handa pr_warn("WARNING: Flushing system-wide workqueues will be prohibited in near future.\n"); 681420bdedafSTetsuo Handa dump_stack(); 681520bdedafSTetsuo Handa } 6816c4f135d6STetsuo Handa EXPORT_SYMBOL(__warn_flushing_systemwide_wq); 6817ace3c549Stiozhang 6818ace3c549Stiozhang static int __init workqueue_unbound_cpus_setup(char *str) 6819ace3c549Stiozhang { 6820ace3c549Stiozhang if (cpulist_parse(str, &wq_cmdline_cpumask) < 0) { 6821ace3c549Stiozhang cpumask_clear(&wq_cmdline_cpumask); 6822ace3c549Stiozhang pr_warn("workqueue.unbound_cpus: incorrect CPU range, using default\n"); 6823ace3c549Stiozhang } 6824ace3c549Stiozhang 6825ace3c549Stiozhang return 1; 6826ace3c549Stiozhang } 6827ace3c549Stiozhang __setup("workqueue.unbound_cpus=", workqueue_unbound_cpus_setup); 6828