11da177e4SLinus Torvalds /* 2c54fce6eSTejun Heo * kernel/workqueue.c - generic async execution with shared worker pool 31da177e4SLinus Torvalds * 4c54fce6eSTejun Heo * Copyright (C) 2002 Ingo Molnar 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Derived from the taskqueue/keventd code by: 71da177e4SLinus Torvalds * David Woodhouse <dwmw2@infradead.org> 8e1f8e874SFrancois Cami * Andrew Morton 91da177e4SLinus Torvalds * Kai Petzke <wpp@marie.physik.tu-berlin.de> 101da177e4SLinus Torvalds * Theodore Ts'o <tytso@mit.edu> 1189ada679SChristoph Lameter * 12cde53535SChristoph Lameter * Made to use alloc_percpu by Christoph Lameter. 13c54fce6eSTejun Heo * 14c54fce6eSTejun Heo * Copyright (C) 2010 SUSE Linux Products GmbH 15c54fce6eSTejun Heo * Copyright (C) 2010 Tejun Heo <tj@kernel.org> 16c54fce6eSTejun Heo * 17c54fce6eSTejun Heo * This is the generic async execution mechanism. Work items as are 18c54fce6eSTejun Heo * executed in process context. The worker pool is shared and 19c54fce6eSTejun Heo * automatically managed. There is one worker pool for each CPU and 20c54fce6eSTejun Heo * one extra for works which are better served by workers which are 21c54fce6eSTejun Heo * not bound to any specific CPU. 22c54fce6eSTejun Heo * 23c54fce6eSTejun Heo * Please read Documentation/workqueue.txt for details. 241da177e4SLinus Torvalds */ 251da177e4SLinus Torvalds 269984de1aSPaul Gortmaker #include <linux/export.h> 271da177e4SLinus Torvalds #include <linux/kernel.h> 281da177e4SLinus Torvalds #include <linux/sched.h> 291da177e4SLinus Torvalds #include <linux/init.h> 301da177e4SLinus Torvalds #include <linux/signal.h> 311da177e4SLinus Torvalds #include <linux/completion.h> 321da177e4SLinus Torvalds #include <linux/workqueue.h> 331da177e4SLinus Torvalds #include <linux/slab.h> 341da177e4SLinus Torvalds #include <linux/cpu.h> 351da177e4SLinus Torvalds #include <linux/notifier.h> 361da177e4SLinus Torvalds #include <linux/kthread.h> 371fa44ecaSJames Bottomley #include <linux/hardirq.h> 3846934023SChristoph Lameter #include <linux/mempolicy.h> 39341a5958SRafael J. Wysocki #include <linux/freezer.h> 40d5abe669SPeter Zijlstra #include <linux/kallsyms.h> 41d5abe669SPeter Zijlstra #include <linux/debug_locks.h> 424e6045f1SJohannes Berg #include <linux/lockdep.h> 43c34056a3STejun Heo #include <linux/idr.h> 4429c91e99STejun Heo #include <linux/jhash.h> 4542f8570fSSasha Levin #include <linux/hashtable.h> 4676af4d93STejun Heo #include <linux/rculist.h> 47e22bee78STejun Heo 48ea138446STejun Heo #include "workqueue_internal.h" 491da177e4SLinus Torvalds 50c8e55f36STejun Heo enum { 51bc2ae0f5STejun Heo /* 5224647570STejun Heo * worker_pool flags 53bc2ae0f5STejun Heo * 5424647570STejun Heo * A bound pool is either associated or disassociated with its CPU. 55bc2ae0f5STejun Heo * While associated (!DISASSOCIATED), all workers are bound to the 56bc2ae0f5STejun Heo * CPU and none has %WORKER_UNBOUND set and concurrency management 57bc2ae0f5STejun Heo * is in effect. 58bc2ae0f5STejun Heo * 59bc2ae0f5STejun Heo * While DISASSOCIATED, the cpu may be offline and all workers have 60bc2ae0f5STejun Heo * %WORKER_UNBOUND set and concurrency management disabled, and may 6124647570STejun Heo * be executing on any CPU. The pool behaves as an unbound one. 62bc2ae0f5STejun Heo * 63bc3a1afcSTejun Heo * Note that DISASSOCIATED should be flipped only while holding 64bc3a1afcSTejun Heo * manager_mutex to avoid changing binding state while 6524647570STejun Heo * create_worker() is in progress. 66bc2ae0f5STejun Heo */ 6711ebea50STejun Heo POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ 6824647570STejun Heo POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ 6935b6bb63STejun Heo POOL_FREEZING = 1 << 3, /* freeze in progress */ 70db7bccf4STejun Heo 71c8e55f36STejun Heo /* worker flags */ 72c8e55f36STejun Heo WORKER_STARTED = 1 << 0, /* started */ 73c8e55f36STejun Heo WORKER_DIE = 1 << 1, /* die die die */ 74c8e55f36STejun Heo WORKER_IDLE = 1 << 2, /* is idle */ 75e22bee78STejun Heo WORKER_PREP = 1 << 3, /* preparing to run works */ 76fb0e7bebSTejun Heo WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ 77f3421797STejun Heo WORKER_UNBOUND = 1 << 7, /* worker is unbound */ 78e22bee78STejun Heo 795f7dabfdSLai Jiangshan WORKER_NOT_RUNNING = WORKER_PREP | WORKER_UNBOUND | 80403c821dSTejun Heo WORKER_CPU_INTENSIVE, 81db7bccf4STejun Heo 82e34cdddbSTejun Heo NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */ 834ce62e9eSTejun Heo 8429c91e99STejun Heo UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */ 85c8e55f36STejun Heo BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ 86db7bccf4STejun Heo 87e22bee78STejun Heo MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ 88e22bee78STejun Heo IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ 89e22bee78STejun Heo 903233cdbdSTejun Heo MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, 913233cdbdSTejun Heo /* call for help after 10ms 923233cdbdSTejun Heo (min two ticks) */ 93e22bee78STejun Heo MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ 94e22bee78STejun Heo CREATE_COOLDOWN = HZ, /* time to breath after fail */ 951da177e4SLinus Torvalds 961da177e4SLinus Torvalds /* 97e22bee78STejun Heo * Rescue workers are used only on emergencies and shared by 98e22bee78STejun Heo * all cpus. Give -20. 99e22bee78STejun Heo */ 100e22bee78STejun Heo RESCUER_NICE_LEVEL = -20, 1013270476aSTejun Heo HIGHPRI_NICE_LEVEL = -20, 102c8e55f36STejun Heo }; 103c8e55f36STejun Heo 1041da177e4SLinus Torvalds /* 1054690c4abSTejun Heo * Structure fields follow one of the following exclusion rules. 1064690c4abSTejun Heo * 107e41e704bSTejun Heo * I: Modifiable by initialization/destruction paths and read-only for 108e41e704bSTejun Heo * everyone else. 1094690c4abSTejun Heo * 110e22bee78STejun Heo * P: Preemption protected. Disabling preemption is enough and should 111e22bee78STejun Heo * only be modified and accessed from the local cpu. 112e22bee78STejun Heo * 113d565ed63STejun Heo * L: pool->lock protected. Access with pool->lock held. 1144690c4abSTejun Heo * 115d565ed63STejun Heo * X: During normal operation, modification requires pool->lock and should 116d565ed63STejun Heo * be done only from local cpu. Either disabling preemption on local 117d565ed63STejun Heo * cpu or grabbing pool->lock is enough for read access. If 118d565ed63STejun Heo * POOL_DISASSOCIATED is set, it's identical to L. 119e22bee78STejun Heo * 12073f53c4aSTejun Heo * F: wq->flush_mutex protected. 12173f53c4aSTejun Heo * 1224690c4abSTejun Heo * W: workqueue_lock protected. 12376af4d93STejun Heo * 12476af4d93STejun Heo * R: workqueue_lock protected for writes. Sched-RCU protected for reads. 12575ccf595STejun Heo * 12675ccf595STejun Heo * FR: wq->flush_mutex and workqueue_lock protected for writes. Sched-RCU 12775ccf595STejun Heo * protected for reads. 1284690c4abSTejun Heo */ 1294690c4abSTejun Heo 1302eaebdb3STejun Heo /* struct worker is defined in workqueue_internal.h */ 131c34056a3STejun Heo 132bd7bdd43STejun Heo struct worker_pool { 133d565ed63STejun Heo spinlock_t lock; /* the pool lock */ 134d84ff051STejun Heo int cpu; /* I: the associated cpu */ 1359daf9e67STejun Heo int id; /* I: pool ID */ 13611ebea50STejun Heo unsigned int flags; /* X: flags */ 137bd7bdd43STejun Heo 138bd7bdd43STejun Heo struct list_head worklist; /* L: list of pending works */ 139bd7bdd43STejun Heo int nr_workers; /* L: total number of workers */ 140ea1abd61SLai Jiangshan 141ea1abd61SLai Jiangshan /* nr_idle includes the ones off idle_list for rebinding */ 142bd7bdd43STejun Heo int nr_idle; /* L: currently idle ones */ 143bd7bdd43STejun Heo 144bd7bdd43STejun Heo struct list_head idle_list; /* X: list of idle workers */ 145bd7bdd43STejun Heo struct timer_list idle_timer; /* L: worker idle timeout */ 146bd7bdd43STejun Heo struct timer_list mayday_timer; /* L: SOS timer for workers */ 147bd7bdd43STejun Heo 148c5aa87bbSTejun Heo /* a workers is either on busy_hash or idle_list, or the manager */ 149c9e7cf27STejun Heo DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER); 150c9e7cf27STejun Heo /* L: hash of busy workers */ 151c9e7cf27STejun Heo 152bc3a1afcSTejun Heo /* see manage_workers() for details on the two manager mutexes */ 15334a06bd6STejun Heo struct mutex manager_arb; /* manager arbitration */ 154bc3a1afcSTejun Heo struct mutex manager_mutex; /* manager exclusion */ 155bd7bdd43STejun Heo struct ida worker_ida; /* L: for worker IDs */ 156e19e397aSTejun Heo 1577a4e344cSTejun Heo struct workqueue_attrs *attrs; /* I: worker attributes */ 158c5aa87bbSTejun Heo struct hlist_node hash_node; /* W: unbound_pool_hash node */ 159c5aa87bbSTejun Heo int refcnt; /* W: refcnt for unbound pools */ 1607a4e344cSTejun Heo 161e19e397aSTejun Heo /* 162e19e397aSTejun Heo * The current concurrency level. As it's likely to be accessed 163e19e397aSTejun Heo * from other CPUs during try_to_wake_up(), put it in a separate 164e19e397aSTejun Heo * cacheline. 165e19e397aSTejun Heo */ 166e19e397aSTejun Heo atomic_t nr_running ____cacheline_aligned_in_smp; 16729c91e99STejun Heo 16829c91e99STejun Heo /* 16929c91e99STejun Heo * Destruction of pool is sched-RCU protected to allow dereferences 17029c91e99STejun Heo * from get_work_pool(). 17129c91e99STejun Heo */ 17229c91e99STejun Heo struct rcu_head rcu; 1738b03ae3cSTejun Heo } ____cacheline_aligned_in_smp; 1748b03ae3cSTejun Heo 1758b03ae3cSTejun Heo /* 176112202d9STejun Heo * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS 177112202d9STejun Heo * of work_struct->data are used for flags and the remaining high bits 178112202d9STejun Heo * point to the pwq; thus, pwqs need to be aligned at two's power of the 179112202d9STejun Heo * number of flag bits. 1801da177e4SLinus Torvalds */ 181112202d9STejun Heo struct pool_workqueue { 182bd7bdd43STejun Heo struct worker_pool *pool; /* I: the associated pool */ 1834690c4abSTejun Heo struct workqueue_struct *wq; /* I: the owning workqueue */ 18473f53c4aSTejun Heo int work_color; /* L: current color */ 18573f53c4aSTejun Heo int flush_color; /* L: flushing color */ 1868864b4e5STejun Heo int refcnt; /* L: reference count */ 18773f53c4aSTejun Heo int nr_in_flight[WORK_NR_COLORS]; 18873f53c4aSTejun Heo /* L: nr of in_flight works */ 1891e19ffc6STejun Heo int nr_active; /* L: nr of active works */ 190a0a1a5fdSTejun Heo int max_active; /* L: max active works */ 1911e19ffc6STejun Heo struct list_head delayed_works; /* L: delayed works */ 19275ccf595STejun Heo struct list_head pwqs_node; /* FR: node on wq->pwqs */ 193493a1724STejun Heo struct list_head mayday_node; /* W: node on wq->maydays */ 1948864b4e5STejun Heo 1958864b4e5STejun Heo /* 1968864b4e5STejun Heo * Release of unbound pwq is punted to system_wq. See put_pwq() 1978864b4e5STejun Heo * and pwq_unbound_release_workfn() for details. pool_workqueue 1988864b4e5STejun Heo * itself is also sched-RCU protected so that the first pwq can be 1998864b4e5STejun Heo * determined without grabbing workqueue_lock. 2008864b4e5STejun Heo */ 2018864b4e5STejun Heo struct work_struct unbound_release_work; 2028864b4e5STejun Heo struct rcu_head rcu; 203e904e6c2STejun Heo } __aligned(1 << WORK_STRUCT_FLAG_BITS); 2041da177e4SLinus Torvalds 2051da177e4SLinus Torvalds /* 20673f53c4aSTejun Heo * Structure used to wait for workqueue flush. 20773f53c4aSTejun Heo */ 20873f53c4aSTejun Heo struct wq_flusher { 20973f53c4aSTejun Heo struct list_head list; /* F: list of flushers */ 21073f53c4aSTejun Heo int flush_color; /* F: flush color waiting for */ 21173f53c4aSTejun Heo struct completion done; /* flush completion */ 21273f53c4aSTejun Heo }; 2131da177e4SLinus Torvalds 214226223abSTejun Heo struct wq_device; 215226223abSTejun Heo 21673f53c4aSTejun Heo /* 217c5aa87bbSTejun Heo * The externally visible workqueue. It relays the issued work items to 218c5aa87bbSTejun Heo * the appropriate worker_pool through its pool_workqueues. 2191da177e4SLinus Torvalds */ 2201da177e4SLinus Torvalds struct workqueue_struct { 2219c5a2ba7STejun Heo unsigned int flags; /* W: WQ_* flags */ 222420c0ddbSTejun Heo struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwq's */ 22375ccf595STejun Heo struct list_head pwqs; /* FR: all pwqs of this wq */ 2244690c4abSTejun Heo struct list_head list; /* W: list of all workqueues */ 22573f53c4aSTejun Heo 22673f53c4aSTejun Heo struct mutex flush_mutex; /* protects wq flushing */ 22773f53c4aSTejun Heo int work_color; /* F: current work color */ 22873f53c4aSTejun Heo int flush_color; /* F: current flush color */ 229112202d9STejun Heo atomic_t nr_pwqs_to_flush; /* flush in progress */ 23073f53c4aSTejun Heo struct wq_flusher *first_flusher; /* F: first flusher */ 23173f53c4aSTejun Heo struct list_head flusher_queue; /* F: flush waiters */ 23273f53c4aSTejun Heo struct list_head flusher_overflow; /* F: flush overflow list */ 23373f53c4aSTejun Heo 234493a1724STejun Heo struct list_head maydays; /* W: pwqs requesting rescue */ 235e22bee78STejun Heo struct worker *rescuer; /* I: rescue worker */ 236e22bee78STejun Heo 2379c5a2ba7STejun Heo int nr_drainers; /* W: drain in progress */ 238112202d9STejun Heo int saved_max_active; /* W: saved pwq max_active */ 239226223abSTejun Heo 240226223abSTejun Heo #ifdef CONFIG_SYSFS 241226223abSTejun Heo struct wq_device *wq_dev; /* I: for sysfs interface */ 242226223abSTejun Heo #endif 2434e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP 2444e6045f1SJohannes Berg struct lockdep_map lockdep_map; 2454e6045f1SJohannes Berg #endif 246b196be89STejun Heo char name[]; /* I: workqueue name */ 2471da177e4SLinus Torvalds }; 2481da177e4SLinus Torvalds 249e904e6c2STejun Heo static struct kmem_cache *pwq_cache; 250e904e6c2STejun Heo 251c5aa87bbSTejun Heo /* W: hash of all unbound pools keyed by pool->attrs */ 25229c91e99STejun Heo static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER); 25329c91e99STejun Heo 254c5aa87bbSTejun Heo /* I: attributes used when instantiating standard unbound pools on demand */ 25529c91e99STejun Heo static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; 25629c91e99STejun Heo 257d320c038STejun Heo struct workqueue_struct *system_wq __read_mostly; 258d320c038STejun Heo EXPORT_SYMBOL_GPL(system_wq); 259044c782cSValentin Ilie struct workqueue_struct *system_highpri_wq __read_mostly; 2601aabe902SJoonsoo Kim EXPORT_SYMBOL_GPL(system_highpri_wq); 261044c782cSValentin Ilie struct workqueue_struct *system_long_wq __read_mostly; 262d320c038STejun Heo EXPORT_SYMBOL_GPL(system_long_wq); 263044c782cSValentin Ilie struct workqueue_struct *system_unbound_wq __read_mostly; 264f3421797STejun Heo EXPORT_SYMBOL_GPL(system_unbound_wq); 265044c782cSValentin Ilie struct workqueue_struct *system_freezable_wq __read_mostly; 26624d51addSTejun Heo EXPORT_SYMBOL_GPL(system_freezable_wq); 267d320c038STejun Heo 26897bd2347STejun Heo #define CREATE_TRACE_POINTS 26997bd2347STejun Heo #include <trace/events/workqueue.h> 27097bd2347STejun Heo 27176af4d93STejun Heo #define assert_rcu_or_wq_lock() \ 27276af4d93STejun Heo rcu_lockdep_assert(rcu_read_lock_sched_held() || \ 27376af4d93STejun Heo lockdep_is_held(&workqueue_lock), \ 27476af4d93STejun Heo "sched RCU or workqueue lock should be held") 27576af4d93STejun Heo 276f02ae73aSTejun Heo #define for_each_cpu_worker_pool(pool, cpu) \ 277f02ae73aSTejun Heo for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ 278f02ae73aSTejun Heo (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ 2797a62c2c8STejun Heo (pool)++) 2804ce62e9eSTejun Heo 281b67bfe0dSSasha Levin #define for_each_busy_worker(worker, i, pool) \ 282b67bfe0dSSasha Levin hash_for_each(pool->busy_hash, i, worker, hentry) 283db7bccf4STejun Heo 28449e3cf44STejun Heo /** 28517116969STejun Heo * for_each_pool - iterate through all worker_pools in the system 28617116969STejun Heo * @pool: iteration cursor 287611c92a0STejun Heo * @pi: integer used for iteration 288fa1b54e6STejun Heo * 289fa1b54e6STejun Heo * This must be called either with workqueue_lock held or sched RCU read 290fa1b54e6STejun Heo * locked. If the pool needs to be used beyond the locking in effect, the 291fa1b54e6STejun Heo * caller is responsible for guaranteeing that the pool stays online. 292fa1b54e6STejun Heo * 293fa1b54e6STejun Heo * The if/else clause exists only for the lockdep assertion and can be 294fa1b54e6STejun Heo * ignored. 29517116969STejun Heo */ 296611c92a0STejun Heo #define for_each_pool(pool, pi) \ 297611c92a0STejun Heo idr_for_each_entry(&worker_pool_idr, pool, pi) \ 298fa1b54e6STejun Heo if (({ assert_rcu_or_wq_lock(); false; })) { } \ 299fa1b54e6STejun Heo else 30017116969STejun Heo 30117116969STejun Heo /** 30249e3cf44STejun Heo * for_each_pwq - iterate through all pool_workqueues of the specified workqueue 30349e3cf44STejun Heo * @pwq: iteration cursor 30449e3cf44STejun Heo * @wq: the target workqueue 30576af4d93STejun Heo * 30676af4d93STejun Heo * This must be called either with workqueue_lock held or sched RCU read 30776af4d93STejun Heo * locked. If the pwq needs to be used beyond the locking in effect, the 30876af4d93STejun Heo * caller is responsible for guaranteeing that the pwq stays online. 30976af4d93STejun Heo * 31076af4d93STejun Heo * The if/else clause exists only for the lockdep assertion and can be 31176af4d93STejun Heo * ignored. 31249e3cf44STejun Heo */ 31349e3cf44STejun Heo #define for_each_pwq(pwq, wq) \ 31476af4d93STejun Heo list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \ 31576af4d93STejun Heo if (({ assert_rcu_or_wq_lock(); false; })) { } \ 31676af4d93STejun Heo else 317f3421797STejun Heo 318dc186ad7SThomas Gleixner #ifdef CONFIG_DEBUG_OBJECTS_WORK 319dc186ad7SThomas Gleixner 320dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr; 321dc186ad7SThomas Gleixner 32299777288SStanislaw Gruszka static void *work_debug_hint(void *addr) 32399777288SStanislaw Gruszka { 32499777288SStanislaw Gruszka return ((struct work_struct *) addr)->func; 32599777288SStanislaw Gruszka } 32699777288SStanislaw Gruszka 327dc186ad7SThomas Gleixner /* 328dc186ad7SThomas Gleixner * fixup_init is called when: 329dc186ad7SThomas Gleixner * - an active object is initialized 330dc186ad7SThomas Gleixner */ 331dc186ad7SThomas Gleixner static int work_fixup_init(void *addr, enum debug_obj_state state) 332dc186ad7SThomas Gleixner { 333dc186ad7SThomas Gleixner struct work_struct *work = addr; 334dc186ad7SThomas Gleixner 335dc186ad7SThomas Gleixner switch (state) { 336dc186ad7SThomas Gleixner case ODEBUG_STATE_ACTIVE: 337dc186ad7SThomas Gleixner cancel_work_sync(work); 338dc186ad7SThomas Gleixner debug_object_init(work, &work_debug_descr); 339dc186ad7SThomas Gleixner return 1; 340dc186ad7SThomas Gleixner default: 341dc186ad7SThomas Gleixner return 0; 342dc186ad7SThomas Gleixner } 343dc186ad7SThomas Gleixner } 344dc186ad7SThomas Gleixner 345dc186ad7SThomas Gleixner /* 346dc186ad7SThomas Gleixner * fixup_activate is called when: 347dc186ad7SThomas Gleixner * - an active object is activated 348dc186ad7SThomas Gleixner * - an unknown object is activated (might be a statically initialized object) 349dc186ad7SThomas Gleixner */ 350dc186ad7SThomas Gleixner static int work_fixup_activate(void *addr, enum debug_obj_state state) 351dc186ad7SThomas Gleixner { 352dc186ad7SThomas Gleixner struct work_struct *work = addr; 353dc186ad7SThomas Gleixner 354dc186ad7SThomas Gleixner switch (state) { 355dc186ad7SThomas Gleixner 356dc186ad7SThomas Gleixner case ODEBUG_STATE_NOTAVAILABLE: 357dc186ad7SThomas Gleixner /* 358dc186ad7SThomas Gleixner * This is not really a fixup. The work struct was 359dc186ad7SThomas Gleixner * statically initialized. We just make sure that it 360dc186ad7SThomas Gleixner * is tracked in the object tracker. 361dc186ad7SThomas Gleixner */ 36222df02bbSTejun Heo if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) { 363dc186ad7SThomas Gleixner debug_object_init(work, &work_debug_descr); 364dc186ad7SThomas Gleixner debug_object_activate(work, &work_debug_descr); 365dc186ad7SThomas Gleixner return 0; 366dc186ad7SThomas Gleixner } 367dc186ad7SThomas Gleixner WARN_ON_ONCE(1); 368dc186ad7SThomas Gleixner return 0; 369dc186ad7SThomas Gleixner 370dc186ad7SThomas Gleixner case ODEBUG_STATE_ACTIVE: 371dc186ad7SThomas Gleixner WARN_ON(1); 372dc186ad7SThomas Gleixner 373dc186ad7SThomas Gleixner default: 374dc186ad7SThomas Gleixner return 0; 375dc186ad7SThomas Gleixner } 376dc186ad7SThomas Gleixner } 377dc186ad7SThomas Gleixner 378dc186ad7SThomas Gleixner /* 379dc186ad7SThomas Gleixner * fixup_free is called when: 380dc186ad7SThomas Gleixner * - an active object is freed 381dc186ad7SThomas Gleixner */ 382dc186ad7SThomas Gleixner static int work_fixup_free(void *addr, enum debug_obj_state state) 383dc186ad7SThomas Gleixner { 384dc186ad7SThomas Gleixner struct work_struct *work = addr; 385dc186ad7SThomas Gleixner 386dc186ad7SThomas Gleixner switch (state) { 387dc186ad7SThomas Gleixner case ODEBUG_STATE_ACTIVE: 388dc186ad7SThomas Gleixner cancel_work_sync(work); 389dc186ad7SThomas Gleixner debug_object_free(work, &work_debug_descr); 390dc186ad7SThomas Gleixner return 1; 391dc186ad7SThomas Gleixner default: 392dc186ad7SThomas Gleixner return 0; 393dc186ad7SThomas Gleixner } 394dc186ad7SThomas Gleixner } 395dc186ad7SThomas Gleixner 396dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr = { 397dc186ad7SThomas Gleixner .name = "work_struct", 39899777288SStanislaw Gruszka .debug_hint = work_debug_hint, 399dc186ad7SThomas Gleixner .fixup_init = work_fixup_init, 400dc186ad7SThomas Gleixner .fixup_activate = work_fixup_activate, 401dc186ad7SThomas Gleixner .fixup_free = work_fixup_free, 402dc186ad7SThomas Gleixner }; 403dc186ad7SThomas Gleixner 404dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work) 405dc186ad7SThomas Gleixner { 406dc186ad7SThomas Gleixner debug_object_activate(work, &work_debug_descr); 407dc186ad7SThomas Gleixner } 408dc186ad7SThomas Gleixner 409dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work) 410dc186ad7SThomas Gleixner { 411dc186ad7SThomas Gleixner debug_object_deactivate(work, &work_debug_descr); 412dc186ad7SThomas Gleixner } 413dc186ad7SThomas Gleixner 414dc186ad7SThomas Gleixner void __init_work(struct work_struct *work, int onstack) 415dc186ad7SThomas Gleixner { 416dc186ad7SThomas Gleixner if (onstack) 417dc186ad7SThomas Gleixner debug_object_init_on_stack(work, &work_debug_descr); 418dc186ad7SThomas Gleixner else 419dc186ad7SThomas Gleixner debug_object_init(work, &work_debug_descr); 420dc186ad7SThomas Gleixner } 421dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(__init_work); 422dc186ad7SThomas Gleixner 423dc186ad7SThomas Gleixner void destroy_work_on_stack(struct work_struct *work) 424dc186ad7SThomas Gleixner { 425dc186ad7SThomas Gleixner debug_object_free(work, &work_debug_descr); 426dc186ad7SThomas Gleixner } 427dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(destroy_work_on_stack); 428dc186ad7SThomas Gleixner 429dc186ad7SThomas Gleixner #else 430dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work) { } 431dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work) { } 432dc186ad7SThomas Gleixner #endif 433dc186ad7SThomas Gleixner 43495402b38SGautham R Shenoy /* Serializes the accesses to the list of workqueues. */ 43595402b38SGautham R Shenoy static DEFINE_SPINLOCK(workqueue_lock); 4361da177e4SLinus Torvalds static LIST_HEAD(workqueues); 437a0a1a5fdSTejun Heo static bool workqueue_freezing; /* W: have wqs started freezing? */ 4381da177e4SLinus Torvalds 439c5aa87bbSTejun Heo /* the per-cpu worker pools */ 440e19e397aSTejun Heo static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], 441f02ae73aSTejun Heo cpu_worker_pools); 442f3421797STejun Heo 443fa1b54e6STejun Heo /* 444c5aa87bbSTejun Heo * R: idr of all pools. Modifications are protected by workqueue_lock. 445c5aa87bbSTejun Heo * Read accesses are protected by sched-RCU protected. 446fa1b54e6STejun Heo */ 4479daf9e67STejun Heo static DEFINE_IDR(worker_pool_idr); 4489daf9e67STejun Heo 449c34056a3STejun Heo static int worker_thread(void *__worker); 450226223abSTejun Heo static void copy_workqueue_attrs(struct workqueue_attrs *to, 451226223abSTejun Heo const struct workqueue_attrs *from); 4521da177e4SLinus Torvalds 4539daf9e67STejun Heo /* allocate ID and assign it to @pool */ 4549daf9e67STejun Heo static int worker_pool_assign_id(struct worker_pool *pool) 4559daf9e67STejun Heo { 4569daf9e67STejun Heo int ret; 4579daf9e67STejun Heo 458fa1b54e6STejun Heo do { 459fa1b54e6STejun Heo if (!idr_pre_get(&worker_pool_idr, GFP_KERNEL)) 460fa1b54e6STejun Heo return -ENOMEM; 461fa1b54e6STejun Heo 462fa1b54e6STejun Heo spin_lock_irq(&workqueue_lock); 4639daf9e67STejun Heo ret = idr_get_new(&worker_pool_idr, pool, &pool->id); 464fa1b54e6STejun Heo spin_unlock_irq(&workqueue_lock); 465fa1b54e6STejun Heo } while (ret == -EAGAIN); 4669daf9e67STejun Heo 4679daf9e67STejun Heo return ret; 4689daf9e67STejun Heo } 4699daf9e67STejun Heo 47076af4d93STejun Heo /** 47176af4d93STejun Heo * first_pwq - return the first pool_workqueue of the specified workqueue 47276af4d93STejun Heo * @wq: the target workqueue 47376af4d93STejun Heo * 47476af4d93STejun Heo * This must be called either with workqueue_lock held or sched RCU read 47576af4d93STejun Heo * locked. If the pwq needs to be used beyond the locking in effect, the 47676af4d93STejun Heo * caller is responsible for guaranteeing that the pwq stays online. 47776af4d93STejun Heo */ 4787fb98ea7STejun Heo static struct pool_workqueue *first_pwq(struct workqueue_struct *wq) 479a848e3b6SOleg Nesterov { 48076af4d93STejun Heo assert_rcu_or_wq_lock(); 48176af4d93STejun Heo return list_first_or_null_rcu(&wq->pwqs, struct pool_workqueue, 48276af4d93STejun Heo pwqs_node); 483f3421797STejun Heo } 484a848e3b6SOleg Nesterov 48573f53c4aSTejun Heo static unsigned int work_color_to_flags(int color) 48673f53c4aSTejun Heo { 48773f53c4aSTejun Heo return color << WORK_STRUCT_COLOR_SHIFT; 48873f53c4aSTejun Heo } 48973f53c4aSTejun Heo 49073f53c4aSTejun Heo static int get_work_color(struct work_struct *work) 49173f53c4aSTejun Heo { 49273f53c4aSTejun Heo return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) & 49373f53c4aSTejun Heo ((1 << WORK_STRUCT_COLOR_BITS) - 1); 49473f53c4aSTejun Heo } 49573f53c4aSTejun Heo 49673f53c4aSTejun Heo static int work_next_color(int color) 49773f53c4aSTejun Heo { 49873f53c4aSTejun Heo return (color + 1) % WORK_NR_COLORS; 499b1f4ec17SOleg Nesterov } 500b1f4ec17SOleg Nesterov 5014594bf15SDavid Howells /* 502112202d9STejun Heo * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data 503112202d9STejun Heo * contain the pointer to the queued pwq. Once execution starts, the flag 5047c3eed5cSTejun Heo * is cleared and the high bits contain OFFQ flags and pool ID. 5057a22ad75STejun Heo * 506112202d9STejun Heo * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling() 507112202d9STejun Heo * and clear_work_data() can be used to set the pwq, pool or clear 508bbb68dfaSTejun Heo * work->data. These functions should only be called while the work is 509bbb68dfaSTejun Heo * owned - ie. while the PENDING bit is set. 5107a22ad75STejun Heo * 511112202d9STejun Heo * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq 5127c3eed5cSTejun Heo * corresponding to a work. Pool is available once the work has been 513112202d9STejun Heo * queued anywhere after initialization until it is sync canceled. pwq is 5147c3eed5cSTejun Heo * available only while the work item is queued. 515bbb68dfaSTejun Heo * 516bbb68dfaSTejun Heo * %WORK_OFFQ_CANCELING is used to mark a work item which is being 517bbb68dfaSTejun Heo * canceled. While being canceled, a work item may have its PENDING set 518bbb68dfaSTejun Heo * but stay off timer and worklist for arbitrarily long and nobody should 519bbb68dfaSTejun Heo * try to steal the PENDING bit. 5204594bf15SDavid Howells */ 5217a22ad75STejun Heo static inline void set_work_data(struct work_struct *work, unsigned long data, 5227a22ad75STejun Heo unsigned long flags) 5237a22ad75STejun Heo { 5246183c009STejun Heo WARN_ON_ONCE(!work_pending(work)); 5257a22ad75STejun Heo atomic_long_set(&work->data, data | flags | work_static(work)); 5267a22ad75STejun Heo } 5277a22ad75STejun Heo 528112202d9STejun Heo static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, 5294690c4abSTejun Heo unsigned long extra_flags) 530365970a1SDavid Howells { 531112202d9STejun Heo set_work_data(work, (unsigned long)pwq, 532112202d9STejun Heo WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags); 533365970a1SDavid Howells } 534365970a1SDavid Howells 5354468a00fSLai Jiangshan static void set_work_pool_and_keep_pending(struct work_struct *work, 5364468a00fSLai Jiangshan int pool_id) 5374468a00fSLai Jiangshan { 5384468a00fSLai Jiangshan set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 5394468a00fSLai Jiangshan WORK_STRUCT_PENDING); 5404468a00fSLai Jiangshan } 5414468a00fSLai Jiangshan 5427c3eed5cSTejun Heo static void set_work_pool_and_clear_pending(struct work_struct *work, 5437c3eed5cSTejun Heo int pool_id) 5444d707b9fSOleg Nesterov { 54523657bb1STejun Heo /* 54623657bb1STejun Heo * The following wmb is paired with the implied mb in 54723657bb1STejun Heo * test_and_set_bit(PENDING) and ensures all updates to @work made 54823657bb1STejun Heo * here are visible to and precede any updates by the next PENDING 54923657bb1STejun Heo * owner. 55023657bb1STejun Heo */ 55123657bb1STejun Heo smp_wmb(); 5527c3eed5cSTejun Heo set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); 5534d707b9fSOleg Nesterov } 5544d707b9fSOleg Nesterov 5557a22ad75STejun Heo static void clear_work_data(struct work_struct *work) 556365970a1SDavid Howells { 5577c3eed5cSTejun Heo smp_wmb(); /* see set_work_pool_and_clear_pending() */ 5587c3eed5cSTejun Heo set_work_data(work, WORK_STRUCT_NO_POOL, 0); 5597a22ad75STejun Heo } 5607a22ad75STejun Heo 561112202d9STejun Heo static struct pool_workqueue *get_work_pwq(struct work_struct *work) 5627a22ad75STejun Heo { 563e120153dSTejun Heo unsigned long data = atomic_long_read(&work->data); 5647a22ad75STejun Heo 565112202d9STejun Heo if (data & WORK_STRUCT_PWQ) 566e120153dSTejun Heo return (void *)(data & WORK_STRUCT_WQ_DATA_MASK); 567e120153dSTejun Heo else 568e120153dSTejun Heo return NULL; 5697a22ad75STejun Heo } 5707a22ad75STejun Heo 5717c3eed5cSTejun Heo /** 5727c3eed5cSTejun Heo * get_work_pool - return the worker_pool a given work was associated with 5737c3eed5cSTejun Heo * @work: the work item of interest 5747c3eed5cSTejun Heo * 5757c3eed5cSTejun Heo * Return the worker_pool @work was last associated with. %NULL if none. 576fa1b54e6STejun Heo * 577fa1b54e6STejun Heo * Pools are created and destroyed under workqueue_lock, and allows read 578fa1b54e6STejun Heo * access under sched-RCU read lock. As such, this function should be 579fa1b54e6STejun Heo * called under workqueue_lock or with preemption disabled. 580fa1b54e6STejun Heo * 581fa1b54e6STejun Heo * All fields of the returned pool are accessible as long as the above 582fa1b54e6STejun Heo * mentioned locking is in effect. If the returned pool needs to be used 583fa1b54e6STejun Heo * beyond the critical section, the caller is responsible for ensuring the 584fa1b54e6STejun Heo * returned pool is and stays online. 5857c3eed5cSTejun Heo */ 5867c3eed5cSTejun Heo static struct worker_pool *get_work_pool(struct work_struct *work) 5877a22ad75STejun Heo { 588e120153dSTejun Heo unsigned long data = atomic_long_read(&work->data); 5897c3eed5cSTejun Heo int pool_id; 5907a22ad75STejun Heo 591fa1b54e6STejun Heo assert_rcu_or_wq_lock(); 592fa1b54e6STejun Heo 593112202d9STejun Heo if (data & WORK_STRUCT_PWQ) 594112202d9STejun Heo return ((struct pool_workqueue *) 5957c3eed5cSTejun Heo (data & WORK_STRUCT_WQ_DATA_MASK))->pool; 5967a22ad75STejun Heo 5977c3eed5cSTejun Heo pool_id = data >> WORK_OFFQ_POOL_SHIFT; 5987c3eed5cSTejun Heo if (pool_id == WORK_OFFQ_POOL_NONE) 5997a22ad75STejun Heo return NULL; 6007a22ad75STejun Heo 601fa1b54e6STejun Heo return idr_find(&worker_pool_idr, pool_id); 6027c3eed5cSTejun Heo } 6037c3eed5cSTejun Heo 6047c3eed5cSTejun Heo /** 6057c3eed5cSTejun Heo * get_work_pool_id - return the worker pool ID a given work is associated with 6067c3eed5cSTejun Heo * @work: the work item of interest 6077c3eed5cSTejun Heo * 6087c3eed5cSTejun Heo * Return the worker_pool ID @work was last associated with. 6097c3eed5cSTejun Heo * %WORK_OFFQ_POOL_NONE if none. 6107c3eed5cSTejun Heo */ 6117c3eed5cSTejun Heo static int get_work_pool_id(struct work_struct *work) 6127c3eed5cSTejun Heo { 61354d5b7d0SLai Jiangshan unsigned long data = atomic_long_read(&work->data); 6147c3eed5cSTejun Heo 615112202d9STejun Heo if (data & WORK_STRUCT_PWQ) 616112202d9STejun Heo return ((struct pool_workqueue *) 61754d5b7d0SLai Jiangshan (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id; 61854d5b7d0SLai Jiangshan 61954d5b7d0SLai Jiangshan return data >> WORK_OFFQ_POOL_SHIFT; 6207c3eed5cSTejun Heo } 6217c3eed5cSTejun Heo 622bbb68dfaSTejun Heo static void mark_work_canceling(struct work_struct *work) 623bbb68dfaSTejun Heo { 6247c3eed5cSTejun Heo unsigned long pool_id = get_work_pool_id(work); 625bbb68dfaSTejun Heo 6267c3eed5cSTejun Heo pool_id <<= WORK_OFFQ_POOL_SHIFT; 6277c3eed5cSTejun Heo set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING); 628bbb68dfaSTejun Heo } 629bbb68dfaSTejun Heo 630bbb68dfaSTejun Heo static bool work_is_canceling(struct work_struct *work) 631bbb68dfaSTejun Heo { 632bbb68dfaSTejun Heo unsigned long data = atomic_long_read(&work->data); 633bbb68dfaSTejun Heo 634112202d9STejun Heo return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING); 635bbb68dfaSTejun Heo } 636bbb68dfaSTejun Heo 637e22bee78STejun Heo /* 6383270476aSTejun Heo * Policy functions. These define the policies on how the global worker 6393270476aSTejun Heo * pools are managed. Unless noted otherwise, these functions assume that 640d565ed63STejun Heo * they're being called with pool->lock held. 641e22bee78STejun Heo */ 642e22bee78STejun Heo 64363d95a91STejun Heo static bool __need_more_worker(struct worker_pool *pool) 644649027d7STejun Heo { 645e19e397aSTejun Heo return !atomic_read(&pool->nr_running); 646649027d7STejun Heo } 647649027d7STejun Heo 648e22bee78STejun Heo /* 649e22bee78STejun Heo * Need to wake up a worker? Called from anything but currently 650e22bee78STejun Heo * running workers. 651974271c4STejun Heo * 652974271c4STejun Heo * Note that, because unbound workers never contribute to nr_running, this 653706026c2STejun Heo * function will always return %true for unbound pools as long as the 654974271c4STejun Heo * worklist isn't empty. 655e22bee78STejun Heo */ 65663d95a91STejun Heo static bool need_more_worker(struct worker_pool *pool) 657e22bee78STejun Heo { 65863d95a91STejun Heo return !list_empty(&pool->worklist) && __need_more_worker(pool); 659e22bee78STejun Heo } 660e22bee78STejun Heo 661e22bee78STejun Heo /* Can I start working? Called from busy but !running workers. */ 66263d95a91STejun Heo static bool may_start_working(struct worker_pool *pool) 663e22bee78STejun Heo { 66463d95a91STejun Heo return pool->nr_idle; 665e22bee78STejun Heo } 666e22bee78STejun Heo 667e22bee78STejun Heo /* Do I need to keep working? Called from currently running workers. */ 66863d95a91STejun Heo static bool keep_working(struct worker_pool *pool) 669e22bee78STejun Heo { 670e19e397aSTejun Heo return !list_empty(&pool->worklist) && 671e19e397aSTejun Heo atomic_read(&pool->nr_running) <= 1; 672e22bee78STejun Heo } 673e22bee78STejun Heo 674e22bee78STejun Heo /* Do we need a new worker? Called from manager. */ 67563d95a91STejun Heo static bool need_to_create_worker(struct worker_pool *pool) 676e22bee78STejun Heo { 67763d95a91STejun Heo return need_more_worker(pool) && !may_start_working(pool); 678e22bee78STejun Heo } 679e22bee78STejun Heo 680e22bee78STejun Heo /* Do I need to be the manager? */ 68163d95a91STejun Heo static bool need_to_manage_workers(struct worker_pool *pool) 682e22bee78STejun Heo { 68363d95a91STejun Heo return need_to_create_worker(pool) || 68411ebea50STejun Heo (pool->flags & POOL_MANAGE_WORKERS); 685e22bee78STejun Heo } 686e22bee78STejun Heo 687e22bee78STejun Heo /* Do we have too many workers and should some go away? */ 68863d95a91STejun Heo static bool too_many_workers(struct worker_pool *pool) 689e22bee78STejun Heo { 69034a06bd6STejun Heo bool managing = mutex_is_locked(&pool->manager_arb); 69163d95a91STejun Heo int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ 69263d95a91STejun Heo int nr_busy = pool->nr_workers - nr_idle; 693e22bee78STejun Heo 694ea1abd61SLai Jiangshan /* 695ea1abd61SLai Jiangshan * nr_idle and idle_list may disagree if idle rebinding is in 696ea1abd61SLai Jiangshan * progress. Never return %true if idle_list is empty. 697ea1abd61SLai Jiangshan */ 698ea1abd61SLai Jiangshan if (list_empty(&pool->idle_list)) 699ea1abd61SLai Jiangshan return false; 700ea1abd61SLai Jiangshan 701e22bee78STejun Heo return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; 702e22bee78STejun Heo } 703e22bee78STejun Heo 704e22bee78STejun Heo /* 705e22bee78STejun Heo * Wake up functions. 706e22bee78STejun Heo */ 707e22bee78STejun Heo 7087e11629dSTejun Heo /* Return the first worker. Safe with preemption disabled */ 70963d95a91STejun Heo static struct worker *first_worker(struct worker_pool *pool) 7107e11629dSTejun Heo { 71163d95a91STejun Heo if (unlikely(list_empty(&pool->idle_list))) 7127e11629dSTejun Heo return NULL; 7137e11629dSTejun Heo 71463d95a91STejun Heo return list_first_entry(&pool->idle_list, struct worker, entry); 7157e11629dSTejun Heo } 7167e11629dSTejun Heo 7177e11629dSTejun Heo /** 7187e11629dSTejun Heo * wake_up_worker - wake up an idle worker 71963d95a91STejun Heo * @pool: worker pool to wake worker from 7207e11629dSTejun Heo * 72163d95a91STejun Heo * Wake up the first idle worker of @pool. 7227e11629dSTejun Heo * 7237e11629dSTejun Heo * CONTEXT: 724d565ed63STejun Heo * spin_lock_irq(pool->lock). 7257e11629dSTejun Heo */ 72663d95a91STejun Heo static void wake_up_worker(struct worker_pool *pool) 7277e11629dSTejun Heo { 72863d95a91STejun Heo struct worker *worker = first_worker(pool); 7297e11629dSTejun Heo 7307e11629dSTejun Heo if (likely(worker)) 7317e11629dSTejun Heo wake_up_process(worker->task); 7327e11629dSTejun Heo } 7337e11629dSTejun Heo 7344690c4abSTejun Heo /** 735e22bee78STejun Heo * wq_worker_waking_up - a worker is waking up 736e22bee78STejun Heo * @task: task waking up 737e22bee78STejun Heo * @cpu: CPU @task is waking up to 738e22bee78STejun Heo * 739e22bee78STejun Heo * This function is called during try_to_wake_up() when a worker is 740e22bee78STejun Heo * being awoken. 741e22bee78STejun Heo * 742e22bee78STejun Heo * CONTEXT: 743e22bee78STejun Heo * spin_lock_irq(rq->lock) 744e22bee78STejun Heo */ 745d84ff051STejun Heo void wq_worker_waking_up(struct task_struct *task, int cpu) 746e22bee78STejun Heo { 747e22bee78STejun Heo struct worker *worker = kthread_data(task); 748e22bee78STejun Heo 74936576000SJoonsoo Kim if (!(worker->flags & WORKER_NOT_RUNNING)) { 750ec22ca5eSTejun Heo WARN_ON_ONCE(worker->pool->cpu != cpu); 751e19e397aSTejun Heo atomic_inc(&worker->pool->nr_running); 752e22bee78STejun Heo } 75336576000SJoonsoo Kim } 754e22bee78STejun Heo 755e22bee78STejun Heo /** 756e22bee78STejun Heo * wq_worker_sleeping - a worker is going to sleep 757e22bee78STejun Heo * @task: task going to sleep 758e22bee78STejun Heo * @cpu: CPU in question, must be the current CPU number 759e22bee78STejun Heo * 760e22bee78STejun Heo * This function is called during schedule() when a busy worker is 761e22bee78STejun Heo * going to sleep. Worker on the same cpu can be woken up by 762e22bee78STejun Heo * returning pointer to its task. 763e22bee78STejun Heo * 764e22bee78STejun Heo * CONTEXT: 765e22bee78STejun Heo * spin_lock_irq(rq->lock) 766e22bee78STejun Heo * 767e22bee78STejun Heo * RETURNS: 768e22bee78STejun Heo * Worker task on @cpu to wake up, %NULL if none. 769e22bee78STejun Heo */ 770d84ff051STejun Heo struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu) 771e22bee78STejun Heo { 772e22bee78STejun Heo struct worker *worker = kthread_data(task), *to_wakeup = NULL; 773111c225aSTejun Heo struct worker_pool *pool; 774e22bee78STejun Heo 775111c225aSTejun Heo /* 776111c225aSTejun Heo * Rescuers, which may not have all the fields set up like normal 777111c225aSTejun Heo * workers, also reach here, let's not access anything before 778111c225aSTejun Heo * checking NOT_RUNNING. 779111c225aSTejun Heo */ 7802d64672eSSteven Rostedt if (worker->flags & WORKER_NOT_RUNNING) 781e22bee78STejun Heo return NULL; 782e22bee78STejun Heo 783111c225aSTejun Heo pool = worker->pool; 784111c225aSTejun Heo 785e22bee78STejun Heo /* this can only happen on the local cpu */ 7866183c009STejun Heo if (WARN_ON_ONCE(cpu != raw_smp_processor_id())) 7876183c009STejun Heo return NULL; 788e22bee78STejun Heo 789e22bee78STejun Heo /* 790e22bee78STejun Heo * The counterpart of the following dec_and_test, implied mb, 791e22bee78STejun Heo * worklist not empty test sequence is in insert_work(). 792e22bee78STejun Heo * Please read comment there. 793e22bee78STejun Heo * 794628c78e7STejun Heo * NOT_RUNNING is clear. This means that we're bound to and 795628c78e7STejun Heo * running on the local cpu w/ rq lock held and preemption 796628c78e7STejun Heo * disabled, which in turn means that none else could be 797d565ed63STejun Heo * manipulating idle_list, so dereferencing idle_list without pool 798628c78e7STejun Heo * lock is safe. 799e22bee78STejun Heo */ 800e19e397aSTejun Heo if (atomic_dec_and_test(&pool->nr_running) && 801e19e397aSTejun Heo !list_empty(&pool->worklist)) 80263d95a91STejun Heo to_wakeup = first_worker(pool); 803e22bee78STejun Heo return to_wakeup ? to_wakeup->task : NULL; 804e22bee78STejun Heo } 805e22bee78STejun Heo 806e22bee78STejun Heo /** 807e22bee78STejun Heo * worker_set_flags - set worker flags and adjust nr_running accordingly 808cb444766STejun Heo * @worker: self 809d302f017STejun Heo * @flags: flags to set 810d302f017STejun Heo * @wakeup: wakeup an idle worker if necessary 811d302f017STejun Heo * 812e22bee78STejun Heo * Set @flags in @worker->flags and adjust nr_running accordingly. If 813e22bee78STejun Heo * nr_running becomes zero and @wakeup is %true, an idle worker is 814e22bee78STejun Heo * woken up. 815d302f017STejun Heo * 816cb444766STejun Heo * CONTEXT: 817d565ed63STejun Heo * spin_lock_irq(pool->lock) 818d302f017STejun Heo */ 819d302f017STejun Heo static inline void worker_set_flags(struct worker *worker, unsigned int flags, 820d302f017STejun Heo bool wakeup) 821d302f017STejun Heo { 822bd7bdd43STejun Heo struct worker_pool *pool = worker->pool; 823e22bee78STejun Heo 824cb444766STejun Heo WARN_ON_ONCE(worker->task != current); 825cb444766STejun Heo 826e22bee78STejun Heo /* 827e22bee78STejun Heo * If transitioning into NOT_RUNNING, adjust nr_running and 828e22bee78STejun Heo * wake up an idle worker as necessary if requested by 829e22bee78STejun Heo * @wakeup. 830e22bee78STejun Heo */ 831e22bee78STejun Heo if ((flags & WORKER_NOT_RUNNING) && 832e22bee78STejun Heo !(worker->flags & WORKER_NOT_RUNNING)) { 833e22bee78STejun Heo if (wakeup) { 834e19e397aSTejun Heo if (atomic_dec_and_test(&pool->nr_running) && 835bd7bdd43STejun Heo !list_empty(&pool->worklist)) 83663d95a91STejun Heo wake_up_worker(pool); 837e22bee78STejun Heo } else 838e19e397aSTejun Heo atomic_dec(&pool->nr_running); 839e22bee78STejun Heo } 840e22bee78STejun Heo 841d302f017STejun Heo worker->flags |= flags; 842d302f017STejun Heo } 843d302f017STejun Heo 844d302f017STejun Heo /** 845e22bee78STejun Heo * worker_clr_flags - clear worker flags and adjust nr_running accordingly 846cb444766STejun Heo * @worker: self 847d302f017STejun Heo * @flags: flags to clear 848d302f017STejun Heo * 849e22bee78STejun Heo * Clear @flags in @worker->flags and adjust nr_running accordingly. 850d302f017STejun Heo * 851cb444766STejun Heo * CONTEXT: 852d565ed63STejun Heo * spin_lock_irq(pool->lock) 853d302f017STejun Heo */ 854d302f017STejun Heo static inline void worker_clr_flags(struct worker *worker, unsigned int flags) 855d302f017STejun Heo { 85663d95a91STejun Heo struct worker_pool *pool = worker->pool; 857e22bee78STejun Heo unsigned int oflags = worker->flags; 858e22bee78STejun Heo 859cb444766STejun Heo WARN_ON_ONCE(worker->task != current); 860cb444766STejun Heo 861d302f017STejun Heo worker->flags &= ~flags; 862e22bee78STejun Heo 86342c025f3STejun Heo /* 86442c025f3STejun Heo * If transitioning out of NOT_RUNNING, increment nr_running. Note 86542c025f3STejun Heo * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask 86642c025f3STejun Heo * of multiple flags, not a single flag. 86742c025f3STejun Heo */ 868e22bee78STejun Heo if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) 869e22bee78STejun Heo if (!(worker->flags & WORKER_NOT_RUNNING)) 870e19e397aSTejun Heo atomic_inc(&pool->nr_running); 871d302f017STejun Heo } 872d302f017STejun Heo 873d302f017STejun Heo /** 8748cca0eeaSTejun Heo * find_worker_executing_work - find worker which is executing a work 875c9e7cf27STejun Heo * @pool: pool of interest 8768cca0eeaSTejun Heo * @work: work to find worker for 8778cca0eeaSTejun Heo * 878c9e7cf27STejun Heo * Find a worker which is executing @work on @pool by searching 879c9e7cf27STejun Heo * @pool->busy_hash which is keyed by the address of @work. For a worker 880a2c1c57bSTejun Heo * to match, its current execution should match the address of @work and 881a2c1c57bSTejun Heo * its work function. This is to avoid unwanted dependency between 882a2c1c57bSTejun Heo * unrelated work executions through a work item being recycled while still 883a2c1c57bSTejun Heo * being executed. 884a2c1c57bSTejun Heo * 885a2c1c57bSTejun Heo * This is a bit tricky. A work item may be freed once its execution 886a2c1c57bSTejun Heo * starts and nothing prevents the freed area from being recycled for 887a2c1c57bSTejun Heo * another work item. If the same work item address ends up being reused 888a2c1c57bSTejun Heo * before the original execution finishes, workqueue will identify the 889a2c1c57bSTejun Heo * recycled work item as currently executing and make it wait until the 890a2c1c57bSTejun Heo * current execution finishes, introducing an unwanted dependency. 891a2c1c57bSTejun Heo * 892c5aa87bbSTejun Heo * This function checks the work item address and work function to avoid 893c5aa87bbSTejun Heo * false positives. Note that this isn't complete as one may construct a 894c5aa87bbSTejun Heo * work function which can introduce dependency onto itself through a 895c5aa87bbSTejun Heo * recycled work item. Well, if somebody wants to shoot oneself in the 896c5aa87bbSTejun Heo * foot that badly, there's only so much we can do, and if such deadlock 897c5aa87bbSTejun Heo * actually occurs, it should be easy to locate the culprit work function. 8988cca0eeaSTejun Heo * 8998cca0eeaSTejun Heo * CONTEXT: 900d565ed63STejun Heo * spin_lock_irq(pool->lock). 9018cca0eeaSTejun Heo * 9028cca0eeaSTejun Heo * RETURNS: 9038cca0eeaSTejun Heo * Pointer to worker which is executing @work if found, NULL 9048cca0eeaSTejun Heo * otherwise. 9058cca0eeaSTejun Heo */ 906c9e7cf27STejun Heo static struct worker *find_worker_executing_work(struct worker_pool *pool, 9078cca0eeaSTejun Heo struct work_struct *work) 9088cca0eeaSTejun Heo { 90942f8570fSSasha Levin struct worker *worker; 91042f8570fSSasha Levin 911b67bfe0dSSasha Levin hash_for_each_possible(pool->busy_hash, worker, hentry, 912a2c1c57bSTejun Heo (unsigned long)work) 913a2c1c57bSTejun Heo if (worker->current_work == work && 914a2c1c57bSTejun Heo worker->current_func == work->func) 91542f8570fSSasha Levin return worker; 91642f8570fSSasha Levin 91742f8570fSSasha Levin return NULL; 9188cca0eeaSTejun Heo } 9198cca0eeaSTejun Heo 9208cca0eeaSTejun Heo /** 921bf4ede01STejun Heo * move_linked_works - move linked works to a list 922bf4ede01STejun Heo * @work: start of series of works to be scheduled 923bf4ede01STejun Heo * @head: target list to append @work to 924bf4ede01STejun Heo * @nextp: out paramter for nested worklist walking 925bf4ede01STejun Heo * 926bf4ede01STejun Heo * Schedule linked works starting from @work to @head. Work series to 927bf4ede01STejun Heo * be scheduled starts at @work and includes any consecutive work with 928bf4ede01STejun Heo * WORK_STRUCT_LINKED set in its predecessor. 929bf4ede01STejun Heo * 930bf4ede01STejun Heo * If @nextp is not NULL, it's updated to point to the next work of 931bf4ede01STejun Heo * the last scheduled work. This allows move_linked_works() to be 932bf4ede01STejun Heo * nested inside outer list_for_each_entry_safe(). 933bf4ede01STejun Heo * 934bf4ede01STejun Heo * CONTEXT: 935d565ed63STejun Heo * spin_lock_irq(pool->lock). 936bf4ede01STejun Heo */ 937bf4ede01STejun Heo static void move_linked_works(struct work_struct *work, struct list_head *head, 938bf4ede01STejun Heo struct work_struct **nextp) 939bf4ede01STejun Heo { 940bf4ede01STejun Heo struct work_struct *n; 941bf4ede01STejun Heo 942bf4ede01STejun Heo /* 943bf4ede01STejun Heo * Linked worklist will always end before the end of the list, 944bf4ede01STejun Heo * use NULL for list head. 945bf4ede01STejun Heo */ 946bf4ede01STejun Heo list_for_each_entry_safe_from(work, n, NULL, entry) { 947bf4ede01STejun Heo list_move_tail(&work->entry, head); 948bf4ede01STejun Heo if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 949bf4ede01STejun Heo break; 950bf4ede01STejun Heo } 951bf4ede01STejun Heo 952bf4ede01STejun Heo /* 953bf4ede01STejun Heo * If we're already inside safe list traversal and have moved 954bf4ede01STejun Heo * multiple works to the scheduled queue, the next position 955bf4ede01STejun Heo * needs to be updated. 956bf4ede01STejun Heo */ 957bf4ede01STejun Heo if (nextp) 958bf4ede01STejun Heo *nextp = n; 959bf4ede01STejun Heo } 960bf4ede01STejun Heo 9618864b4e5STejun Heo /** 9628864b4e5STejun Heo * get_pwq - get an extra reference on the specified pool_workqueue 9638864b4e5STejun Heo * @pwq: pool_workqueue to get 9648864b4e5STejun Heo * 9658864b4e5STejun Heo * Obtain an extra reference on @pwq. The caller should guarantee that 9668864b4e5STejun Heo * @pwq has positive refcnt and be holding the matching pool->lock. 9678864b4e5STejun Heo */ 9688864b4e5STejun Heo static void get_pwq(struct pool_workqueue *pwq) 9698864b4e5STejun Heo { 9708864b4e5STejun Heo lockdep_assert_held(&pwq->pool->lock); 9718864b4e5STejun Heo WARN_ON_ONCE(pwq->refcnt <= 0); 9728864b4e5STejun Heo pwq->refcnt++; 9738864b4e5STejun Heo } 9748864b4e5STejun Heo 9758864b4e5STejun Heo /** 9768864b4e5STejun Heo * put_pwq - put a pool_workqueue reference 9778864b4e5STejun Heo * @pwq: pool_workqueue to put 9788864b4e5STejun Heo * 9798864b4e5STejun Heo * Drop a reference of @pwq. If its refcnt reaches zero, schedule its 9808864b4e5STejun Heo * destruction. The caller should be holding the matching pool->lock. 9818864b4e5STejun Heo */ 9828864b4e5STejun Heo static void put_pwq(struct pool_workqueue *pwq) 9838864b4e5STejun Heo { 9848864b4e5STejun Heo lockdep_assert_held(&pwq->pool->lock); 9858864b4e5STejun Heo if (likely(--pwq->refcnt)) 9868864b4e5STejun Heo return; 9878864b4e5STejun Heo if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND))) 9888864b4e5STejun Heo return; 9898864b4e5STejun Heo /* 9908864b4e5STejun Heo * @pwq can't be released under pool->lock, bounce to 9918864b4e5STejun Heo * pwq_unbound_release_workfn(). This never recurses on the same 9928864b4e5STejun Heo * pool->lock as this path is taken only for unbound workqueues and 9938864b4e5STejun Heo * the release work item is scheduled on a per-cpu workqueue. To 9948864b4e5STejun Heo * avoid lockdep warning, unbound pool->locks are given lockdep 9958864b4e5STejun Heo * subclass of 1 in get_unbound_pool(). 9968864b4e5STejun Heo */ 9978864b4e5STejun Heo schedule_work(&pwq->unbound_release_work); 9988864b4e5STejun Heo } 9998864b4e5STejun Heo 1000112202d9STejun Heo static void pwq_activate_delayed_work(struct work_struct *work) 1001bf4ede01STejun Heo { 1002112202d9STejun Heo struct pool_workqueue *pwq = get_work_pwq(work); 1003bf4ede01STejun Heo 1004bf4ede01STejun Heo trace_workqueue_activate_work(work); 1005112202d9STejun Heo move_linked_works(work, &pwq->pool->worklist, NULL); 1006bf4ede01STejun Heo __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); 1007112202d9STejun Heo pwq->nr_active++; 1008bf4ede01STejun Heo } 1009bf4ede01STejun Heo 1010112202d9STejun Heo static void pwq_activate_first_delayed(struct pool_workqueue *pwq) 10113aa62497SLai Jiangshan { 1012112202d9STejun Heo struct work_struct *work = list_first_entry(&pwq->delayed_works, 10133aa62497SLai Jiangshan struct work_struct, entry); 10143aa62497SLai Jiangshan 1015112202d9STejun Heo pwq_activate_delayed_work(work); 10163aa62497SLai Jiangshan } 10173aa62497SLai Jiangshan 1018bf4ede01STejun Heo /** 1019112202d9STejun Heo * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight 1020112202d9STejun Heo * @pwq: pwq of interest 1021bf4ede01STejun Heo * @color: color of work which left the queue 1022bf4ede01STejun Heo * 1023bf4ede01STejun Heo * A work either has completed or is removed from pending queue, 1024112202d9STejun Heo * decrement nr_in_flight of its pwq and handle workqueue flushing. 1025bf4ede01STejun Heo * 1026bf4ede01STejun Heo * CONTEXT: 1027d565ed63STejun Heo * spin_lock_irq(pool->lock). 1028bf4ede01STejun Heo */ 1029112202d9STejun Heo static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color) 1030bf4ede01STejun Heo { 10318864b4e5STejun Heo /* uncolored work items don't participate in flushing or nr_active */ 1032bf4ede01STejun Heo if (color == WORK_NO_COLOR) 10338864b4e5STejun Heo goto out_put; 1034bf4ede01STejun Heo 1035112202d9STejun Heo pwq->nr_in_flight[color]--; 1036bf4ede01STejun Heo 1037112202d9STejun Heo pwq->nr_active--; 1038112202d9STejun Heo if (!list_empty(&pwq->delayed_works)) { 1039bf4ede01STejun Heo /* one down, submit a delayed one */ 1040112202d9STejun Heo if (pwq->nr_active < pwq->max_active) 1041112202d9STejun Heo pwq_activate_first_delayed(pwq); 1042bf4ede01STejun Heo } 1043bf4ede01STejun Heo 1044bf4ede01STejun Heo /* is flush in progress and are we at the flushing tip? */ 1045112202d9STejun Heo if (likely(pwq->flush_color != color)) 10468864b4e5STejun Heo goto out_put; 1047bf4ede01STejun Heo 1048bf4ede01STejun Heo /* are there still in-flight works? */ 1049112202d9STejun Heo if (pwq->nr_in_flight[color]) 10508864b4e5STejun Heo goto out_put; 1051bf4ede01STejun Heo 1052112202d9STejun Heo /* this pwq is done, clear flush_color */ 1053112202d9STejun Heo pwq->flush_color = -1; 1054bf4ede01STejun Heo 1055bf4ede01STejun Heo /* 1056112202d9STejun Heo * If this was the last pwq, wake up the first flusher. It 1057bf4ede01STejun Heo * will handle the rest. 1058bf4ede01STejun Heo */ 1059112202d9STejun Heo if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) 1060112202d9STejun Heo complete(&pwq->wq->first_flusher->done); 10618864b4e5STejun Heo out_put: 10628864b4e5STejun Heo put_pwq(pwq); 1063bf4ede01STejun Heo } 1064bf4ede01STejun Heo 106536e227d2STejun Heo /** 1066bbb68dfaSTejun Heo * try_to_grab_pending - steal work item from worklist and disable irq 106736e227d2STejun Heo * @work: work item to steal 106836e227d2STejun Heo * @is_dwork: @work is a delayed_work 1069bbb68dfaSTejun Heo * @flags: place to store irq state 107036e227d2STejun Heo * 107136e227d2STejun Heo * Try to grab PENDING bit of @work. This function can handle @work in any 107236e227d2STejun Heo * stable state - idle, on timer or on worklist. Return values are 107336e227d2STejun Heo * 107436e227d2STejun Heo * 1 if @work was pending and we successfully stole PENDING 107536e227d2STejun Heo * 0 if @work was idle and we claimed PENDING 107636e227d2STejun Heo * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry 1077bbb68dfaSTejun Heo * -ENOENT if someone else is canceling @work, this state may persist 1078bbb68dfaSTejun Heo * for arbitrarily long 107936e227d2STejun Heo * 1080bbb68dfaSTejun Heo * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting 1081e0aecdd8STejun Heo * interrupted while holding PENDING and @work off queue, irq must be 1082e0aecdd8STejun Heo * disabled on entry. This, combined with delayed_work->timer being 1083e0aecdd8STejun Heo * irqsafe, ensures that we return -EAGAIN for finite short period of time. 1084bbb68dfaSTejun Heo * 1085bbb68dfaSTejun Heo * On successful return, >= 0, irq is disabled and the caller is 1086bbb68dfaSTejun Heo * responsible for releasing it using local_irq_restore(*@flags). 1087bbb68dfaSTejun Heo * 1088e0aecdd8STejun Heo * This function is safe to call from any context including IRQ handler. 1089bf4ede01STejun Heo */ 1090bbb68dfaSTejun Heo static int try_to_grab_pending(struct work_struct *work, bool is_dwork, 1091bbb68dfaSTejun Heo unsigned long *flags) 1092bf4ede01STejun Heo { 1093d565ed63STejun Heo struct worker_pool *pool; 1094112202d9STejun Heo struct pool_workqueue *pwq; 1095bf4ede01STejun Heo 1096bbb68dfaSTejun Heo local_irq_save(*flags); 1097bbb68dfaSTejun Heo 109836e227d2STejun Heo /* try to steal the timer if it exists */ 109936e227d2STejun Heo if (is_dwork) { 110036e227d2STejun Heo struct delayed_work *dwork = to_delayed_work(work); 110136e227d2STejun Heo 1102e0aecdd8STejun Heo /* 1103e0aecdd8STejun Heo * dwork->timer is irqsafe. If del_timer() fails, it's 1104e0aecdd8STejun Heo * guaranteed that the timer is not queued anywhere and not 1105e0aecdd8STejun Heo * running on the local CPU. 1106e0aecdd8STejun Heo */ 110736e227d2STejun Heo if (likely(del_timer(&dwork->timer))) 110836e227d2STejun Heo return 1; 110936e227d2STejun Heo } 111036e227d2STejun Heo 111136e227d2STejun Heo /* try to claim PENDING the normal way */ 1112bf4ede01STejun Heo if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) 1113bf4ede01STejun Heo return 0; 1114bf4ede01STejun Heo 1115bf4ede01STejun Heo /* 1116bf4ede01STejun Heo * The queueing is in progress, or it is already queued. Try to 1117bf4ede01STejun Heo * steal it from ->worklist without clearing WORK_STRUCT_PENDING. 1118bf4ede01STejun Heo */ 1119d565ed63STejun Heo pool = get_work_pool(work); 1120d565ed63STejun Heo if (!pool) 1121bbb68dfaSTejun Heo goto fail; 1122bf4ede01STejun Heo 1123d565ed63STejun Heo spin_lock(&pool->lock); 1124bf4ede01STejun Heo /* 1125112202d9STejun Heo * work->data is guaranteed to point to pwq only while the work 1126112202d9STejun Heo * item is queued on pwq->wq, and both updating work->data to point 1127112202d9STejun Heo * to pwq on queueing and to pool on dequeueing are done under 1128112202d9STejun Heo * pwq->pool->lock. This in turn guarantees that, if work->data 1129112202d9STejun Heo * points to pwq which is associated with a locked pool, the work 11300b3dae68SLai Jiangshan * item is currently queued on that pool. 1131bf4ede01STejun Heo */ 1132112202d9STejun Heo pwq = get_work_pwq(work); 1133112202d9STejun Heo if (pwq && pwq->pool == pool) { 1134bf4ede01STejun Heo debug_work_deactivate(work); 11353aa62497SLai Jiangshan 11363aa62497SLai Jiangshan /* 113716062836STejun Heo * A delayed work item cannot be grabbed directly because 113816062836STejun Heo * it might have linked NO_COLOR work items which, if left 1139112202d9STejun Heo * on the delayed_list, will confuse pwq->nr_active 114016062836STejun Heo * management later on and cause stall. Make sure the work 114116062836STejun Heo * item is activated before grabbing. 11423aa62497SLai Jiangshan */ 11433aa62497SLai Jiangshan if (*work_data_bits(work) & WORK_STRUCT_DELAYED) 1144112202d9STejun Heo pwq_activate_delayed_work(work); 11453aa62497SLai Jiangshan 1146bf4ede01STejun Heo list_del_init(&work->entry); 1147112202d9STejun Heo pwq_dec_nr_in_flight(get_work_pwq(work), get_work_color(work)); 114836e227d2STejun Heo 1149112202d9STejun Heo /* work->data points to pwq iff queued, point to pool */ 11504468a00fSLai Jiangshan set_work_pool_and_keep_pending(work, pool->id); 11514468a00fSLai Jiangshan 1152d565ed63STejun Heo spin_unlock(&pool->lock); 115336e227d2STejun Heo return 1; 1154bf4ede01STejun Heo } 1155d565ed63STejun Heo spin_unlock(&pool->lock); 1156bbb68dfaSTejun Heo fail: 1157bbb68dfaSTejun Heo local_irq_restore(*flags); 1158bbb68dfaSTejun Heo if (work_is_canceling(work)) 1159bbb68dfaSTejun Heo return -ENOENT; 1160bbb68dfaSTejun Heo cpu_relax(); 116136e227d2STejun Heo return -EAGAIN; 1162bf4ede01STejun Heo } 1163bf4ede01STejun Heo 1164bf4ede01STejun Heo /** 1165706026c2STejun Heo * insert_work - insert a work into a pool 1166112202d9STejun Heo * @pwq: pwq @work belongs to 11674690c4abSTejun Heo * @work: work to insert 11684690c4abSTejun Heo * @head: insertion point 11694690c4abSTejun Heo * @extra_flags: extra WORK_STRUCT_* flags to set 11704690c4abSTejun Heo * 1171112202d9STejun Heo * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to 1172706026c2STejun Heo * work_struct flags. 11734690c4abSTejun Heo * 11744690c4abSTejun Heo * CONTEXT: 1175d565ed63STejun Heo * spin_lock_irq(pool->lock). 1176365970a1SDavid Howells */ 1177112202d9STejun Heo static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, 1178112202d9STejun Heo struct list_head *head, unsigned int extra_flags) 1179b89deed3SOleg Nesterov { 1180112202d9STejun Heo struct worker_pool *pool = pwq->pool; 1181e1d8aa9fSFrederic Weisbecker 11824690c4abSTejun Heo /* we own @work, set data and link */ 1183112202d9STejun Heo set_work_pwq(work, pwq, extra_flags); 11841a4d9b0aSOleg Nesterov list_add_tail(&work->entry, head); 11858864b4e5STejun Heo get_pwq(pwq); 1186e22bee78STejun Heo 1187e22bee78STejun Heo /* 1188c5aa87bbSTejun Heo * Ensure either wq_worker_sleeping() sees the above 1189c5aa87bbSTejun Heo * list_add_tail() or we see zero nr_running to avoid workers lying 1190c5aa87bbSTejun Heo * around lazily while there are works to be processed. 1191e22bee78STejun Heo */ 1192e22bee78STejun Heo smp_mb(); 1193e22bee78STejun Heo 119463d95a91STejun Heo if (__need_more_worker(pool)) 119563d95a91STejun Heo wake_up_worker(pool); 1196b89deed3SOleg Nesterov } 1197b89deed3SOleg Nesterov 1198c8efcc25STejun Heo /* 1199c8efcc25STejun Heo * Test whether @work is being queued from another work executing on the 12008d03ecfeSTejun Heo * same workqueue. 1201c8efcc25STejun Heo */ 1202c8efcc25STejun Heo static bool is_chained_work(struct workqueue_struct *wq) 1203c8efcc25STejun Heo { 1204c8efcc25STejun Heo struct worker *worker; 1205c8efcc25STejun Heo 12068d03ecfeSTejun Heo worker = current_wq_worker(); 1207c8efcc25STejun Heo /* 12088d03ecfeSTejun Heo * Return %true iff I'm a worker execuing a work item on @wq. If 12098d03ecfeSTejun Heo * I'm @worker, it's safe to dereference it without locking. 1210c8efcc25STejun Heo */ 1211112202d9STejun Heo return worker && worker->current_pwq->wq == wq; 1212c8efcc25STejun Heo } 1213c8efcc25STejun Heo 1214d84ff051STejun Heo static void __queue_work(int cpu, struct workqueue_struct *wq, 12151da177e4SLinus Torvalds struct work_struct *work) 12161da177e4SLinus Torvalds { 1217112202d9STejun Heo struct pool_workqueue *pwq; 1218c9178087STejun Heo struct worker_pool *last_pool; 12191e19ffc6STejun Heo struct list_head *worklist; 12208a2e8e5dSTejun Heo unsigned int work_flags; 1221b75cac93SJoonsoo Kim unsigned int req_cpu = cpu; 12228930cabaSTejun Heo 12238930cabaSTejun Heo /* 12248930cabaSTejun Heo * While a work item is PENDING && off queue, a task trying to 12258930cabaSTejun Heo * steal the PENDING will busy-loop waiting for it to either get 12268930cabaSTejun Heo * queued or lose PENDING. Grabbing PENDING and queueing should 12278930cabaSTejun Heo * happen with IRQ disabled. 12288930cabaSTejun Heo */ 12298930cabaSTejun Heo WARN_ON_ONCE(!irqs_disabled()); 12301da177e4SLinus Torvalds 1231dc186ad7SThomas Gleixner debug_work_activate(work); 12321e19ffc6STejun Heo 1233c8efcc25STejun Heo /* if dying, only works from the same workqueue are allowed */ 1234618b01ebSTejun Heo if (unlikely(wq->flags & __WQ_DRAINING) && 1235c8efcc25STejun Heo WARN_ON_ONCE(!is_chained_work(wq))) 1236e41e704bSTejun Heo return; 12379e8cd2f5STejun Heo retry: 1238c9178087STejun Heo /* pwq which will be used unless @work is executing elsewhere */ 1239c7fc77f7STejun Heo if (!(wq->flags & WQ_UNBOUND)) { 124057469821STejun Heo if (cpu == WORK_CPU_UNBOUND) 1241f3421797STejun Heo cpu = raw_smp_processor_id(); 1242c9178087STejun Heo pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); 1243c9178087STejun Heo } else { 1244c9178087STejun Heo pwq = first_pwq(wq); 1245c9178087STejun Heo } 1246f3421797STejun Heo 124718aa9effSTejun Heo /* 1248c9178087STejun Heo * If @work was previously on a different pool, it might still be 1249c9178087STejun Heo * running there, in which case the work needs to be queued on that 1250c9178087STejun Heo * pool to guarantee non-reentrancy. 125118aa9effSTejun Heo */ 1252c9e7cf27STejun Heo last_pool = get_work_pool(work); 1253112202d9STejun Heo if (last_pool && last_pool != pwq->pool) { 125418aa9effSTejun Heo struct worker *worker; 125518aa9effSTejun Heo 1256d565ed63STejun Heo spin_lock(&last_pool->lock); 125718aa9effSTejun Heo 1258c9e7cf27STejun Heo worker = find_worker_executing_work(last_pool, work); 125918aa9effSTejun Heo 1260112202d9STejun Heo if (worker && worker->current_pwq->wq == wq) { 1261c9178087STejun Heo pwq = worker->current_pwq; 12628594fadeSLai Jiangshan } else { 126318aa9effSTejun Heo /* meh... not running there, queue here */ 1264d565ed63STejun Heo spin_unlock(&last_pool->lock); 1265112202d9STejun Heo spin_lock(&pwq->pool->lock); 126618aa9effSTejun Heo } 12678930cabaSTejun Heo } else { 1268112202d9STejun Heo spin_lock(&pwq->pool->lock); 12698930cabaSTejun Heo } 1270502ca9d8STejun Heo 12719e8cd2f5STejun Heo /* 12729e8cd2f5STejun Heo * pwq is determined and locked. For unbound pools, we could have 12739e8cd2f5STejun Heo * raced with pwq release and it could already be dead. If its 12749e8cd2f5STejun Heo * refcnt is zero, repeat pwq selection. Note that pwqs never die 12759e8cd2f5STejun Heo * without another pwq replacing it as the first pwq or while a 12769e8cd2f5STejun Heo * work item is executing on it, so the retying is guaranteed to 12779e8cd2f5STejun Heo * make forward-progress. 12789e8cd2f5STejun Heo */ 12799e8cd2f5STejun Heo if (unlikely(!pwq->refcnt)) { 12809e8cd2f5STejun Heo if (wq->flags & WQ_UNBOUND) { 12819e8cd2f5STejun Heo spin_unlock(&pwq->pool->lock); 12829e8cd2f5STejun Heo cpu_relax(); 12839e8cd2f5STejun Heo goto retry; 12849e8cd2f5STejun Heo } 12859e8cd2f5STejun Heo /* oops */ 12869e8cd2f5STejun Heo WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt", 12879e8cd2f5STejun Heo wq->name, cpu); 12889e8cd2f5STejun Heo } 12899e8cd2f5STejun Heo 1290112202d9STejun Heo /* pwq determined, queue */ 1291112202d9STejun Heo trace_workqueue_queue_work(req_cpu, pwq, work); 1292502ca9d8STejun Heo 1293f5b2552bSDan Carpenter if (WARN_ON(!list_empty(&work->entry))) { 1294112202d9STejun Heo spin_unlock(&pwq->pool->lock); 1295f5b2552bSDan Carpenter return; 1296f5b2552bSDan Carpenter } 12971e19ffc6STejun Heo 1298112202d9STejun Heo pwq->nr_in_flight[pwq->work_color]++; 1299112202d9STejun Heo work_flags = work_color_to_flags(pwq->work_color); 13001e19ffc6STejun Heo 1301112202d9STejun Heo if (likely(pwq->nr_active < pwq->max_active)) { 1302cdadf009STejun Heo trace_workqueue_activate_work(work); 1303112202d9STejun Heo pwq->nr_active++; 1304112202d9STejun Heo worklist = &pwq->pool->worklist; 13058a2e8e5dSTejun Heo } else { 13068a2e8e5dSTejun Heo work_flags |= WORK_STRUCT_DELAYED; 1307112202d9STejun Heo worklist = &pwq->delayed_works; 13088a2e8e5dSTejun Heo } 13091e19ffc6STejun Heo 1310112202d9STejun Heo insert_work(pwq, work, worklist, work_flags); 13111e19ffc6STejun Heo 1312112202d9STejun Heo spin_unlock(&pwq->pool->lock); 13131da177e4SLinus Torvalds } 13141da177e4SLinus Torvalds 13150fcb78c2SRolf Eike Beer /** 1316c1a220e7SZhang Rui * queue_work_on - queue work on specific cpu 1317c1a220e7SZhang Rui * @cpu: CPU number to execute work on 1318c1a220e7SZhang Rui * @wq: workqueue to use 1319c1a220e7SZhang Rui * @work: work to queue 1320c1a220e7SZhang Rui * 1321d4283e93STejun Heo * Returns %false if @work was already on a queue, %true otherwise. 1322c1a220e7SZhang Rui * 1323c1a220e7SZhang Rui * We queue the work to a specific CPU, the caller must ensure it 1324c1a220e7SZhang Rui * can't go away. 1325c1a220e7SZhang Rui */ 1326d4283e93STejun Heo bool queue_work_on(int cpu, struct workqueue_struct *wq, 1327d4283e93STejun Heo struct work_struct *work) 1328c1a220e7SZhang Rui { 1329d4283e93STejun Heo bool ret = false; 13308930cabaSTejun Heo unsigned long flags; 13318930cabaSTejun Heo 13328930cabaSTejun Heo local_irq_save(flags); 1333c1a220e7SZhang Rui 133422df02bbSTejun Heo if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 13354690c4abSTejun Heo __queue_work(cpu, wq, work); 1336d4283e93STejun Heo ret = true; 1337c1a220e7SZhang Rui } 13388930cabaSTejun Heo 13398930cabaSTejun Heo local_irq_restore(flags); 1340c1a220e7SZhang Rui return ret; 1341c1a220e7SZhang Rui } 1342c1a220e7SZhang Rui EXPORT_SYMBOL_GPL(queue_work_on); 1343c1a220e7SZhang Rui 1344d8e794dfSTejun Heo void delayed_work_timer_fn(unsigned long __data) 13451da177e4SLinus Torvalds { 134652bad64dSDavid Howells struct delayed_work *dwork = (struct delayed_work *)__data; 13471da177e4SLinus Torvalds 1348e0aecdd8STejun Heo /* should have been called from irqsafe timer with irq already off */ 134960c057bcSLai Jiangshan __queue_work(dwork->cpu, dwork->wq, &dwork->work); 13501da177e4SLinus Torvalds } 13511438ade5SKonstantin Khlebnikov EXPORT_SYMBOL(delayed_work_timer_fn); 13521da177e4SLinus Torvalds 13537beb2edfSTejun Heo static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, 135452bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 13551da177e4SLinus Torvalds { 13567beb2edfSTejun Heo struct timer_list *timer = &dwork->timer; 13577beb2edfSTejun Heo struct work_struct *work = &dwork->work; 13581da177e4SLinus Torvalds 13597beb2edfSTejun Heo WARN_ON_ONCE(timer->function != delayed_work_timer_fn || 13607beb2edfSTejun Heo timer->data != (unsigned long)dwork); 1361fc4b514fSTejun Heo WARN_ON_ONCE(timer_pending(timer)); 1362fc4b514fSTejun Heo WARN_ON_ONCE(!list_empty(&work->entry)); 13637beb2edfSTejun Heo 13648852aac2STejun Heo /* 13658852aac2STejun Heo * If @delay is 0, queue @dwork->work immediately. This is for 13668852aac2STejun Heo * both optimization and correctness. The earliest @timer can 13678852aac2STejun Heo * expire is on the closest next tick and delayed_work users depend 13688852aac2STejun Heo * on that there's no such delay when @delay is 0. 13698852aac2STejun Heo */ 13708852aac2STejun Heo if (!delay) { 13718852aac2STejun Heo __queue_work(cpu, wq, &dwork->work); 13728852aac2STejun Heo return; 13738852aac2STejun Heo } 13748852aac2STejun Heo 13757beb2edfSTejun Heo timer_stats_timer_set_start_info(&dwork->timer); 13767beb2edfSTejun Heo 137760c057bcSLai Jiangshan dwork->wq = wq; 13781265057fSTejun Heo dwork->cpu = cpu; 13797beb2edfSTejun Heo timer->expires = jiffies + delay; 13807beb2edfSTejun Heo 13817beb2edfSTejun Heo if (unlikely(cpu != WORK_CPU_UNBOUND)) 13827beb2edfSTejun Heo add_timer_on(timer, cpu); 13837beb2edfSTejun Heo else 13847beb2edfSTejun Heo add_timer(timer); 13857beb2edfSTejun Heo } 13861da177e4SLinus Torvalds 13870fcb78c2SRolf Eike Beer /** 13880fcb78c2SRolf Eike Beer * queue_delayed_work_on - queue work on specific CPU after delay 13890fcb78c2SRolf Eike Beer * @cpu: CPU number to execute work on 13900fcb78c2SRolf Eike Beer * @wq: workqueue to use 1391af9997e4SRandy Dunlap * @dwork: work to queue 13920fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 13930fcb78c2SRolf Eike Beer * 1394715f1300STejun Heo * Returns %false if @work was already on a queue, %true otherwise. If 1395715f1300STejun Heo * @delay is zero and @dwork is idle, it will be scheduled for immediate 1396715f1300STejun Heo * execution. 13970fcb78c2SRolf Eike Beer */ 1398d4283e93STejun Heo bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 139952bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 14007a6bc1cdSVenkatesh Pallipadi { 140152bad64dSDavid Howells struct work_struct *work = &dwork->work; 1402d4283e93STejun Heo bool ret = false; 14038930cabaSTejun Heo unsigned long flags; 14048930cabaSTejun Heo 14058930cabaSTejun Heo /* read the comment in __queue_work() */ 14068930cabaSTejun Heo local_irq_save(flags); 14077a6bc1cdSVenkatesh Pallipadi 140822df02bbSTejun Heo if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 14097beb2edfSTejun Heo __queue_delayed_work(cpu, wq, dwork, delay); 1410d4283e93STejun Heo ret = true; 14117a6bc1cdSVenkatesh Pallipadi } 14128930cabaSTejun Heo 14138930cabaSTejun Heo local_irq_restore(flags); 14147a6bc1cdSVenkatesh Pallipadi return ret; 14157a6bc1cdSVenkatesh Pallipadi } 1416ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on); 14171da177e4SLinus Torvalds 1418c8e55f36STejun Heo /** 14198376fe22STejun Heo * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU 14208376fe22STejun Heo * @cpu: CPU number to execute work on 14218376fe22STejun Heo * @wq: workqueue to use 14228376fe22STejun Heo * @dwork: work to queue 14238376fe22STejun Heo * @delay: number of jiffies to wait before queueing 14248376fe22STejun Heo * 14258376fe22STejun Heo * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise, 14268376fe22STejun Heo * modify @dwork's timer so that it expires after @delay. If @delay is 14278376fe22STejun Heo * zero, @work is guaranteed to be scheduled immediately regardless of its 14288376fe22STejun Heo * current state. 14298376fe22STejun Heo * 14308376fe22STejun Heo * Returns %false if @dwork was idle and queued, %true if @dwork was 14318376fe22STejun Heo * pending and its timer was modified. 14328376fe22STejun Heo * 1433e0aecdd8STejun Heo * This function is safe to call from any context including IRQ handler. 14348376fe22STejun Heo * See try_to_grab_pending() for details. 14358376fe22STejun Heo */ 14368376fe22STejun Heo bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 14378376fe22STejun Heo struct delayed_work *dwork, unsigned long delay) 14388376fe22STejun Heo { 14398376fe22STejun Heo unsigned long flags; 14408376fe22STejun Heo int ret; 14418376fe22STejun Heo 14428376fe22STejun Heo do { 14438376fe22STejun Heo ret = try_to_grab_pending(&dwork->work, true, &flags); 14448376fe22STejun Heo } while (unlikely(ret == -EAGAIN)); 14458376fe22STejun Heo 14468376fe22STejun Heo if (likely(ret >= 0)) { 14478376fe22STejun Heo __queue_delayed_work(cpu, wq, dwork, delay); 14488376fe22STejun Heo local_irq_restore(flags); 14498376fe22STejun Heo } 14508376fe22STejun Heo 14518376fe22STejun Heo /* -ENOENT from try_to_grab_pending() becomes %true */ 14528376fe22STejun Heo return ret; 14538376fe22STejun Heo } 14548376fe22STejun Heo EXPORT_SYMBOL_GPL(mod_delayed_work_on); 14558376fe22STejun Heo 14568376fe22STejun Heo /** 1457c8e55f36STejun Heo * worker_enter_idle - enter idle state 1458c8e55f36STejun Heo * @worker: worker which is entering idle state 1459c8e55f36STejun Heo * 1460c8e55f36STejun Heo * @worker is entering idle state. Update stats and idle timer if 1461c8e55f36STejun Heo * necessary. 1462c8e55f36STejun Heo * 1463c8e55f36STejun Heo * LOCKING: 1464d565ed63STejun Heo * spin_lock_irq(pool->lock). 1465c8e55f36STejun Heo */ 1466c8e55f36STejun Heo static void worker_enter_idle(struct worker *worker) 14671da177e4SLinus Torvalds { 1468bd7bdd43STejun Heo struct worker_pool *pool = worker->pool; 1469c8e55f36STejun Heo 14706183c009STejun Heo if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) || 14716183c009STejun Heo WARN_ON_ONCE(!list_empty(&worker->entry) && 14726183c009STejun Heo (worker->hentry.next || worker->hentry.pprev))) 14736183c009STejun Heo return; 1474c8e55f36STejun Heo 1475cb444766STejun Heo /* can't use worker_set_flags(), also called from start_worker() */ 1476cb444766STejun Heo worker->flags |= WORKER_IDLE; 1477bd7bdd43STejun Heo pool->nr_idle++; 1478e22bee78STejun Heo worker->last_active = jiffies; 1479c8e55f36STejun Heo 1480c8e55f36STejun Heo /* idle_list is LIFO */ 1481bd7bdd43STejun Heo list_add(&worker->entry, &pool->idle_list); 1482db7bccf4STejun Heo 148363d95a91STejun Heo if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) 1484628c78e7STejun Heo mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); 1485cb444766STejun Heo 1486544ecf31STejun Heo /* 1487706026c2STejun Heo * Sanity check nr_running. Because wq_unbind_fn() releases 1488d565ed63STejun Heo * pool->lock between setting %WORKER_UNBOUND and zapping 1489628c78e7STejun Heo * nr_running, the warning may trigger spuriously. Check iff 1490628c78e7STejun Heo * unbind is not in progress. 1491544ecf31STejun Heo */ 149224647570STejun Heo WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && 1493bd7bdd43STejun Heo pool->nr_workers == pool->nr_idle && 1494e19e397aSTejun Heo atomic_read(&pool->nr_running)); 1495c8e55f36STejun Heo } 1496c8e55f36STejun Heo 1497c8e55f36STejun Heo /** 1498c8e55f36STejun Heo * worker_leave_idle - leave idle state 1499c8e55f36STejun Heo * @worker: worker which is leaving idle state 1500c8e55f36STejun Heo * 1501c8e55f36STejun Heo * @worker is leaving idle state. Update stats. 1502c8e55f36STejun Heo * 1503c8e55f36STejun Heo * LOCKING: 1504d565ed63STejun Heo * spin_lock_irq(pool->lock). 1505c8e55f36STejun Heo */ 1506c8e55f36STejun Heo static void worker_leave_idle(struct worker *worker) 1507c8e55f36STejun Heo { 1508bd7bdd43STejun Heo struct worker_pool *pool = worker->pool; 1509c8e55f36STejun Heo 15106183c009STejun Heo if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE))) 15116183c009STejun Heo return; 1512d302f017STejun Heo worker_clr_flags(worker, WORKER_IDLE); 1513bd7bdd43STejun Heo pool->nr_idle--; 1514c8e55f36STejun Heo list_del_init(&worker->entry); 1515c8e55f36STejun Heo } 1516c8e55f36STejun Heo 1517e22bee78STejun Heo /** 1518f36dc67bSLai Jiangshan * worker_maybe_bind_and_lock - try to bind %current to worker_pool and lock it 1519f36dc67bSLai Jiangshan * @pool: target worker_pool 1520f36dc67bSLai Jiangshan * 1521f36dc67bSLai Jiangshan * Bind %current to the cpu of @pool if it is associated and lock @pool. 1522e22bee78STejun Heo * 1523e22bee78STejun Heo * Works which are scheduled while the cpu is online must at least be 1524e22bee78STejun Heo * scheduled to a worker which is bound to the cpu so that if they are 1525e22bee78STejun Heo * flushed from cpu callbacks while cpu is going down, they are 1526e22bee78STejun Heo * guaranteed to execute on the cpu. 1527e22bee78STejun Heo * 1528f5faa077SLai Jiangshan * This function is to be used by unbound workers and rescuers to bind 1529e22bee78STejun Heo * themselves to the target cpu and may race with cpu going down or 1530e22bee78STejun Heo * coming online. kthread_bind() can't be used because it may put the 1531e22bee78STejun Heo * worker to already dead cpu and set_cpus_allowed_ptr() can't be used 1532706026c2STejun Heo * verbatim as it's best effort and blocking and pool may be 1533e22bee78STejun Heo * [dis]associated in the meantime. 1534e22bee78STejun Heo * 1535706026c2STejun Heo * This function tries set_cpus_allowed() and locks pool and verifies the 153624647570STejun Heo * binding against %POOL_DISASSOCIATED which is set during 1537f2d5a0eeSTejun Heo * %CPU_DOWN_PREPARE and cleared during %CPU_ONLINE, so if the worker 1538f2d5a0eeSTejun Heo * enters idle state or fetches works without dropping lock, it can 1539f2d5a0eeSTejun Heo * guarantee the scheduling requirement described in the first paragraph. 1540e22bee78STejun Heo * 1541e22bee78STejun Heo * CONTEXT: 1542d565ed63STejun Heo * Might sleep. Called without any lock but returns with pool->lock 1543e22bee78STejun Heo * held. 1544e22bee78STejun Heo * 1545e22bee78STejun Heo * RETURNS: 1546706026c2STejun Heo * %true if the associated pool is online (@worker is successfully 1547e22bee78STejun Heo * bound), %false if offline. 1548e22bee78STejun Heo */ 1549f36dc67bSLai Jiangshan static bool worker_maybe_bind_and_lock(struct worker_pool *pool) 1550d565ed63STejun Heo __acquires(&pool->lock) 1551e22bee78STejun Heo { 1552e22bee78STejun Heo while (true) { 1553e22bee78STejun Heo /* 1554e22bee78STejun Heo * The following call may fail, succeed or succeed 1555e22bee78STejun Heo * without actually migrating the task to the cpu if 1556e22bee78STejun Heo * it races with cpu hotunplug operation. Verify 155724647570STejun Heo * against POOL_DISASSOCIATED. 1558e22bee78STejun Heo */ 155924647570STejun Heo if (!(pool->flags & POOL_DISASSOCIATED)) 15607a4e344cSTejun Heo set_cpus_allowed_ptr(current, pool->attrs->cpumask); 1561e22bee78STejun Heo 1562d565ed63STejun Heo spin_lock_irq(&pool->lock); 156324647570STejun Heo if (pool->flags & POOL_DISASSOCIATED) 1564e22bee78STejun Heo return false; 1565f5faa077SLai Jiangshan if (task_cpu(current) == pool->cpu && 15667a4e344cSTejun Heo cpumask_equal(¤t->cpus_allowed, pool->attrs->cpumask)) 1567e22bee78STejun Heo return true; 1568d565ed63STejun Heo spin_unlock_irq(&pool->lock); 1569e22bee78STejun Heo 15705035b20fSTejun Heo /* 15715035b20fSTejun Heo * We've raced with CPU hot[un]plug. Give it a breather 15725035b20fSTejun Heo * and retry migration. cond_resched() is required here; 15735035b20fSTejun Heo * otherwise, we might deadlock against cpu_stop trying to 15745035b20fSTejun Heo * bring down the CPU on non-preemptive kernel. 15755035b20fSTejun Heo */ 1576e22bee78STejun Heo cpu_relax(); 15775035b20fSTejun Heo cond_resched(); 1578e22bee78STejun Heo } 1579e22bee78STejun Heo } 1580e22bee78STejun Heo 1581e22bee78STejun Heo /* 1582ea1abd61SLai Jiangshan * Rebind an idle @worker to its CPU. worker_thread() will test 15835f7dabfdSLai Jiangshan * list_empty(@worker->entry) before leaving idle and call this function. 158425511a47STejun Heo */ 158525511a47STejun Heo static void idle_worker_rebind(struct worker *worker) 158625511a47STejun Heo { 15875f7dabfdSLai Jiangshan /* CPU may go down again inbetween, clear UNBOUND only on success */ 1588f36dc67bSLai Jiangshan if (worker_maybe_bind_and_lock(worker->pool)) 15895f7dabfdSLai Jiangshan worker_clr_flags(worker, WORKER_UNBOUND); 159025511a47STejun Heo 1591ea1abd61SLai Jiangshan /* rebind complete, become available again */ 1592ea1abd61SLai Jiangshan list_add(&worker->entry, &worker->pool->idle_list); 1593d565ed63STejun Heo spin_unlock_irq(&worker->pool->lock); 159425511a47STejun Heo } 159525511a47STejun Heo 159625511a47STejun Heo /* 159725511a47STejun Heo * Function for @worker->rebind.work used to rebind unbound busy workers to 1598403c821dSTejun Heo * the associated cpu which is coming back online. This is scheduled by 1599403c821dSTejun Heo * cpu up but can race with other cpu hotplug operations and may be 1600403c821dSTejun Heo * executed twice without intervening cpu down. 1601e22bee78STejun Heo */ 160225511a47STejun Heo static void busy_worker_rebind_fn(struct work_struct *work) 1603e22bee78STejun Heo { 1604e22bee78STejun Heo struct worker *worker = container_of(work, struct worker, rebind_work); 1605e22bee78STejun Heo 1606f36dc67bSLai Jiangshan if (worker_maybe_bind_and_lock(worker->pool)) 1607eab6d828SLai Jiangshan worker_clr_flags(worker, WORKER_UNBOUND); 1608e22bee78STejun Heo 1609d565ed63STejun Heo spin_unlock_irq(&worker->pool->lock); 1610e22bee78STejun Heo } 1611e22bee78STejun Heo 161225511a47STejun Heo /** 161394cf58bbSTejun Heo * rebind_workers - rebind all workers of a pool to the associated CPU 161494cf58bbSTejun Heo * @pool: pool of interest 161525511a47STejun Heo * 161694cf58bbSTejun Heo * @pool->cpu is coming online. Rebind all workers to the CPU. Rebinding 161725511a47STejun Heo * is different for idle and busy ones. 161825511a47STejun Heo * 1619ea1abd61SLai Jiangshan * Idle ones will be removed from the idle_list and woken up. They will 1620ea1abd61SLai Jiangshan * add themselves back after completing rebind. This ensures that the 1621ea1abd61SLai Jiangshan * idle_list doesn't contain any unbound workers when re-bound busy workers 1622ea1abd61SLai Jiangshan * try to perform local wake-ups for concurrency management. 162325511a47STejun Heo * 1624ea1abd61SLai Jiangshan * Busy workers can rebind after they finish their current work items. 1625ea1abd61SLai Jiangshan * Queueing the rebind work item at the head of the scheduled list is 1626ea1abd61SLai Jiangshan * enough. Note that nr_running will be properly bumped as busy workers 1627ea1abd61SLai Jiangshan * rebind. 162825511a47STejun Heo * 1629ea1abd61SLai Jiangshan * On return, all non-manager workers are scheduled for rebind - see 1630ea1abd61SLai Jiangshan * manage_workers() for the manager special case. Any idle worker 1631ea1abd61SLai Jiangshan * including the manager will not appear on @idle_list until rebind is 1632ea1abd61SLai Jiangshan * complete, making local wake-ups safe. 163325511a47STejun Heo */ 163494cf58bbSTejun Heo static void rebind_workers(struct worker_pool *pool) 163525511a47STejun Heo { 1636ea1abd61SLai Jiangshan struct worker *worker, *n; 163725511a47STejun Heo int i; 163825511a47STejun Heo 1639bc3a1afcSTejun Heo lockdep_assert_held(&pool->manager_mutex); 1640d565ed63STejun Heo lockdep_assert_held(&pool->lock); 164125511a47STejun Heo 16425f7dabfdSLai Jiangshan /* dequeue and kick idle ones */ 1643ea1abd61SLai Jiangshan list_for_each_entry_safe(worker, n, &pool->idle_list, entry) { 1644ea1abd61SLai Jiangshan /* 164594cf58bbSTejun Heo * idle workers should be off @pool->idle_list until rebind 164694cf58bbSTejun Heo * is complete to avoid receiving premature local wake-ups. 1647ea1abd61SLai Jiangshan */ 1648ea1abd61SLai Jiangshan list_del_init(&worker->entry); 164925511a47STejun Heo 165025511a47STejun Heo /* 165194cf58bbSTejun Heo * worker_thread() will see the above dequeuing and call 165294cf58bbSTejun Heo * idle_worker_rebind(). 165325511a47STejun Heo */ 165425511a47STejun Heo wake_up_process(worker->task); 165525511a47STejun Heo } 165625511a47STejun Heo 1657ea1abd61SLai Jiangshan /* rebind busy workers */ 1658b67bfe0dSSasha Levin for_each_busy_worker(worker, i, pool) { 165925511a47STejun Heo struct work_struct *rebind_work = &worker->rebind_work; 1660e2b6a6d5SJoonsoo Kim struct workqueue_struct *wq; 166125511a47STejun Heo 166225511a47STejun Heo if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, 166325511a47STejun Heo work_data_bits(rebind_work))) 166425511a47STejun Heo continue; 166525511a47STejun Heo 166625511a47STejun Heo debug_work_activate(rebind_work); 166790beca5dSTejun Heo 166890beca5dSTejun Heo /* 166994cf58bbSTejun Heo * wq doesn't really matter but let's keep @worker->pool 1670112202d9STejun Heo * and @pwq->pool consistent for sanity. 167190beca5dSTejun Heo */ 16727a4e344cSTejun Heo if (worker->pool->attrs->nice < 0) 1673e2b6a6d5SJoonsoo Kim wq = system_highpri_wq; 1674e2b6a6d5SJoonsoo Kim else 1675e2b6a6d5SJoonsoo Kim wq = system_wq; 1676ec58815aSTejun Heo 16777fb98ea7STejun Heo insert_work(per_cpu_ptr(wq->cpu_pwqs, pool->cpu), rebind_work, 167825511a47STejun Heo worker->scheduled.next, 167925511a47STejun Heo work_color_to_flags(WORK_NO_COLOR)); 1680ec58815aSTejun Heo } 168125511a47STejun Heo } 168225511a47STejun Heo 1683c34056a3STejun Heo static struct worker *alloc_worker(void) 1684c34056a3STejun Heo { 1685c34056a3STejun Heo struct worker *worker; 1686c34056a3STejun Heo 1687c34056a3STejun Heo worker = kzalloc(sizeof(*worker), GFP_KERNEL); 1688c8e55f36STejun Heo if (worker) { 1689c8e55f36STejun Heo INIT_LIST_HEAD(&worker->entry); 1690affee4b2STejun Heo INIT_LIST_HEAD(&worker->scheduled); 169125511a47STejun Heo INIT_WORK(&worker->rebind_work, busy_worker_rebind_fn); 1692e22bee78STejun Heo /* on creation a worker is in !idle && prep state */ 1693e22bee78STejun Heo worker->flags = WORKER_PREP; 1694c8e55f36STejun Heo } 1695c34056a3STejun Heo return worker; 1696c34056a3STejun Heo } 1697c34056a3STejun Heo 1698c34056a3STejun Heo /** 1699c34056a3STejun Heo * create_worker - create a new workqueue worker 170063d95a91STejun Heo * @pool: pool the new worker will belong to 1701c34056a3STejun Heo * 170263d95a91STejun Heo * Create a new worker which is bound to @pool. The returned worker 1703c34056a3STejun Heo * can be started by calling start_worker() or destroyed using 1704c34056a3STejun Heo * destroy_worker(). 1705c34056a3STejun Heo * 1706c34056a3STejun Heo * CONTEXT: 1707c34056a3STejun Heo * Might sleep. Does GFP_KERNEL allocations. 1708c34056a3STejun Heo * 1709c34056a3STejun Heo * RETURNS: 1710c34056a3STejun Heo * Pointer to the newly created worker. 1711c34056a3STejun Heo */ 1712bc2ae0f5STejun Heo static struct worker *create_worker(struct worker_pool *pool) 1713c34056a3STejun Heo { 17147a4e344cSTejun Heo const char *pri = pool->attrs->nice < 0 ? "H" : ""; 1715c34056a3STejun Heo struct worker *worker = NULL; 1716f3421797STejun Heo int id = -1; 1717c34056a3STejun Heo 1718cd549687STejun Heo lockdep_assert_held(&pool->manager_mutex); 1719cd549687STejun Heo 1720d565ed63STejun Heo spin_lock_irq(&pool->lock); 1721bd7bdd43STejun Heo while (ida_get_new(&pool->worker_ida, &id)) { 1722d565ed63STejun Heo spin_unlock_irq(&pool->lock); 1723bd7bdd43STejun Heo if (!ida_pre_get(&pool->worker_ida, GFP_KERNEL)) 1724c34056a3STejun Heo goto fail; 1725d565ed63STejun Heo spin_lock_irq(&pool->lock); 1726c34056a3STejun Heo } 1727d565ed63STejun Heo spin_unlock_irq(&pool->lock); 1728c34056a3STejun Heo 1729c34056a3STejun Heo worker = alloc_worker(); 1730c34056a3STejun Heo if (!worker) 1731c34056a3STejun Heo goto fail; 1732c34056a3STejun Heo 1733bd7bdd43STejun Heo worker->pool = pool; 1734c34056a3STejun Heo worker->id = id; 1735c34056a3STejun Heo 173629c91e99STejun Heo if (pool->cpu >= 0) 173794dcf29aSEric Dumazet worker->task = kthread_create_on_node(worker_thread, 1738ec22ca5eSTejun Heo worker, cpu_to_node(pool->cpu), 1739d84ff051STejun Heo "kworker/%d:%d%s", pool->cpu, id, pri); 1740f3421797STejun Heo else 1741f3421797STejun Heo worker->task = kthread_create(worker_thread, worker, 1742ac6104cdSTejun Heo "kworker/u%d:%d%s", 1743ac6104cdSTejun Heo pool->id, id, pri); 1744c34056a3STejun Heo if (IS_ERR(worker->task)) 1745c34056a3STejun Heo goto fail; 1746c34056a3STejun Heo 1747c5aa87bbSTejun Heo /* 1748c5aa87bbSTejun Heo * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any 1749c5aa87bbSTejun Heo * online CPUs. It'll be re-applied when any of the CPUs come up. 1750c5aa87bbSTejun Heo */ 17517a4e344cSTejun Heo set_user_nice(worker->task, pool->attrs->nice); 17527a4e344cSTejun Heo set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); 17533270476aSTejun Heo 1754db7bccf4STejun Heo /* 17557a4e344cSTejun Heo * %PF_THREAD_BOUND is used to prevent userland from meddling with 17567a4e344cSTejun Heo * cpumask of workqueue workers. This is an abuse. We need 17577a4e344cSTejun Heo * %PF_NO_SETAFFINITY. 1758db7bccf4STejun Heo */ 1759db7bccf4STejun Heo worker->task->flags |= PF_THREAD_BOUND; 17607a4e344cSTejun Heo 17617a4e344cSTejun Heo /* 17627a4e344cSTejun Heo * The caller is responsible for ensuring %POOL_DISASSOCIATED 17637a4e344cSTejun Heo * remains stable across this function. See the comments above the 17647a4e344cSTejun Heo * flag definition for details. 17657a4e344cSTejun Heo */ 17667a4e344cSTejun Heo if (pool->flags & POOL_DISASSOCIATED) 1767f3421797STejun Heo worker->flags |= WORKER_UNBOUND; 1768c34056a3STejun Heo 1769c34056a3STejun Heo return worker; 1770c34056a3STejun Heo fail: 1771c34056a3STejun Heo if (id >= 0) { 1772d565ed63STejun Heo spin_lock_irq(&pool->lock); 1773bd7bdd43STejun Heo ida_remove(&pool->worker_ida, id); 1774d565ed63STejun Heo spin_unlock_irq(&pool->lock); 1775c34056a3STejun Heo } 1776c34056a3STejun Heo kfree(worker); 1777c34056a3STejun Heo return NULL; 1778c34056a3STejun Heo } 1779c34056a3STejun Heo 1780c34056a3STejun Heo /** 1781c34056a3STejun Heo * start_worker - start a newly created worker 1782c34056a3STejun Heo * @worker: worker to start 1783c34056a3STejun Heo * 1784706026c2STejun Heo * Make the pool aware of @worker and start it. 1785c34056a3STejun Heo * 1786c34056a3STejun Heo * CONTEXT: 1787d565ed63STejun Heo * spin_lock_irq(pool->lock). 1788c34056a3STejun Heo */ 1789c34056a3STejun Heo static void start_worker(struct worker *worker) 1790c34056a3STejun Heo { 1791cb444766STejun Heo worker->flags |= WORKER_STARTED; 1792bd7bdd43STejun Heo worker->pool->nr_workers++; 1793c8e55f36STejun Heo worker_enter_idle(worker); 1794c34056a3STejun Heo wake_up_process(worker->task); 1795c34056a3STejun Heo } 1796c34056a3STejun Heo 1797c34056a3STejun Heo /** 1798ebf44d16STejun Heo * create_and_start_worker - create and start a worker for a pool 1799ebf44d16STejun Heo * @pool: the target pool 1800ebf44d16STejun Heo * 1801cd549687STejun Heo * Grab the managership of @pool and create and start a new worker for it. 1802ebf44d16STejun Heo */ 1803ebf44d16STejun Heo static int create_and_start_worker(struct worker_pool *pool) 1804ebf44d16STejun Heo { 1805ebf44d16STejun Heo struct worker *worker; 1806ebf44d16STejun Heo 1807cd549687STejun Heo mutex_lock(&pool->manager_mutex); 1808cd549687STejun Heo 1809ebf44d16STejun Heo worker = create_worker(pool); 1810ebf44d16STejun Heo if (worker) { 1811ebf44d16STejun Heo spin_lock_irq(&pool->lock); 1812ebf44d16STejun Heo start_worker(worker); 1813ebf44d16STejun Heo spin_unlock_irq(&pool->lock); 1814ebf44d16STejun Heo } 1815ebf44d16STejun Heo 1816cd549687STejun Heo mutex_unlock(&pool->manager_mutex); 1817cd549687STejun Heo 1818ebf44d16STejun Heo return worker ? 0 : -ENOMEM; 1819ebf44d16STejun Heo } 1820ebf44d16STejun Heo 1821ebf44d16STejun Heo /** 1822c34056a3STejun Heo * destroy_worker - destroy a workqueue worker 1823c34056a3STejun Heo * @worker: worker to be destroyed 1824c34056a3STejun Heo * 1825706026c2STejun Heo * Destroy @worker and adjust @pool stats accordingly. 1826c8e55f36STejun Heo * 1827c8e55f36STejun Heo * CONTEXT: 1828d565ed63STejun Heo * spin_lock_irq(pool->lock) which is released and regrabbed. 1829c34056a3STejun Heo */ 1830c34056a3STejun Heo static void destroy_worker(struct worker *worker) 1831c34056a3STejun Heo { 1832bd7bdd43STejun Heo struct worker_pool *pool = worker->pool; 1833c34056a3STejun Heo int id = worker->id; 1834c34056a3STejun Heo 1835cd549687STejun Heo lockdep_assert_held(&pool->manager_mutex); 1836cd549687STejun Heo lockdep_assert_held(&pool->lock); 1837cd549687STejun Heo 1838c34056a3STejun Heo /* sanity check frenzy */ 18396183c009STejun Heo if (WARN_ON(worker->current_work) || 18406183c009STejun Heo WARN_ON(!list_empty(&worker->scheduled))) 18416183c009STejun Heo return; 1842c34056a3STejun Heo 1843c8e55f36STejun Heo if (worker->flags & WORKER_STARTED) 1844bd7bdd43STejun Heo pool->nr_workers--; 1845c8e55f36STejun Heo if (worker->flags & WORKER_IDLE) 1846bd7bdd43STejun Heo pool->nr_idle--; 1847c8e55f36STejun Heo 1848c8e55f36STejun Heo list_del_init(&worker->entry); 1849cb444766STejun Heo worker->flags |= WORKER_DIE; 1850c8e55f36STejun Heo 1851d565ed63STejun Heo spin_unlock_irq(&pool->lock); 1852c8e55f36STejun Heo 1853c34056a3STejun Heo kthread_stop(worker->task); 1854c34056a3STejun Heo kfree(worker); 1855c34056a3STejun Heo 1856d565ed63STejun Heo spin_lock_irq(&pool->lock); 1857bd7bdd43STejun Heo ida_remove(&pool->worker_ida, id); 1858c34056a3STejun Heo } 1859c34056a3STejun Heo 186063d95a91STejun Heo static void idle_worker_timeout(unsigned long __pool) 1861e22bee78STejun Heo { 186263d95a91STejun Heo struct worker_pool *pool = (void *)__pool; 1863e22bee78STejun Heo 1864d565ed63STejun Heo spin_lock_irq(&pool->lock); 1865e22bee78STejun Heo 186663d95a91STejun Heo if (too_many_workers(pool)) { 1867e22bee78STejun Heo struct worker *worker; 1868e22bee78STejun Heo unsigned long expires; 1869e22bee78STejun Heo 1870e22bee78STejun Heo /* idle_list is kept in LIFO order, check the last one */ 187163d95a91STejun Heo worker = list_entry(pool->idle_list.prev, struct worker, entry); 1872e22bee78STejun Heo expires = worker->last_active + IDLE_WORKER_TIMEOUT; 1873e22bee78STejun Heo 1874e22bee78STejun Heo if (time_before(jiffies, expires)) 187563d95a91STejun Heo mod_timer(&pool->idle_timer, expires); 1876e22bee78STejun Heo else { 1877e22bee78STejun Heo /* it's been idle for too long, wake up manager */ 187811ebea50STejun Heo pool->flags |= POOL_MANAGE_WORKERS; 187963d95a91STejun Heo wake_up_worker(pool); 1880e22bee78STejun Heo } 1881e22bee78STejun Heo } 1882e22bee78STejun Heo 1883d565ed63STejun Heo spin_unlock_irq(&pool->lock); 1884e22bee78STejun Heo } 1885e22bee78STejun Heo 1886493a1724STejun Heo static void send_mayday(struct work_struct *work) 1887e22bee78STejun Heo { 1888112202d9STejun Heo struct pool_workqueue *pwq = get_work_pwq(work); 1889112202d9STejun Heo struct workqueue_struct *wq = pwq->wq; 1890493a1724STejun Heo 1891493a1724STejun Heo lockdep_assert_held(&workqueue_lock); 1892e22bee78STejun Heo 1893493008a8STejun Heo if (!wq->rescuer) 1894493a1724STejun Heo return; 1895e22bee78STejun Heo 1896e22bee78STejun Heo /* mayday mayday mayday */ 1897493a1724STejun Heo if (list_empty(&pwq->mayday_node)) { 1898493a1724STejun Heo list_add_tail(&pwq->mayday_node, &wq->maydays); 1899e22bee78STejun Heo wake_up_process(wq->rescuer->task); 1900493a1724STejun Heo } 1901e22bee78STejun Heo } 1902e22bee78STejun Heo 1903706026c2STejun Heo static void pool_mayday_timeout(unsigned long __pool) 1904e22bee78STejun Heo { 190563d95a91STejun Heo struct worker_pool *pool = (void *)__pool; 1906e22bee78STejun Heo struct work_struct *work; 1907e22bee78STejun Heo 1908493a1724STejun Heo spin_lock_irq(&workqueue_lock); /* for wq->maydays */ 1909493a1724STejun Heo spin_lock(&pool->lock); 1910e22bee78STejun Heo 191163d95a91STejun Heo if (need_to_create_worker(pool)) { 1912e22bee78STejun Heo /* 1913e22bee78STejun Heo * We've been trying to create a new worker but 1914e22bee78STejun Heo * haven't been successful. We might be hitting an 1915e22bee78STejun Heo * allocation deadlock. Send distress signals to 1916e22bee78STejun Heo * rescuers. 1917e22bee78STejun Heo */ 191863d95a91STejun Heo list_for_each_entry(work, &pool->worklist, entry) 1919e22bee78STejun Heo send_mayday(work); 1920e22bee78STejun Heo } 1921e22bee78STejun Heo 1922493a1724STejun Heo spin_unlock(&pool->lock); 1923493a1724STejun Heo spin_unlock_irq(&workqueue_lock); 1924e22bee78STejun Heo 192563d95a91STejun Heo mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); 1926e22bee78STejun Heo } 1927e22bee78STejun Heo 1928e22bee78STejun Heo /** 1929e22bee78STejun Heo * maybe_create_worker - create a new worker if necessary 193063d95a91STejun Heo * @pool: pool to create a new worker for 1931e22bee78STejun Heo * 193263d95a91STejun Heo * Create a new worker for @pool if necessary. @pool is guaranteed to 1933e22bee78STejun Heo * have at least one idle worker on return from this function. If 1934e22bee78STejun Heo * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is 193563d95a91STejun Heo * sent to all rescuers with works scheduled on @pool to resolve 1936e22bee78STejun Heo * possible allocation deadlock. 1937e22bee78STejun Heo * 1938c5aa87bbSTejun Heo * On return, need_to_create_worker() is guaranteed to be %false and 1939c5aa87bbSTejun Heo * may_start_working() %true. 1940e22bee78STejun Heo * 1941e22bee78STejun Heo * LOCKING: 1942d565ed63STejun Heo * spin_lock_irq(pool->lock) which may be released and regrabbed 1943e22bee78STejun Heo * multiple times. Does GFP_KERNEL allocations. Called only from 1944e22bee78STejun Heo * manager. 1945e22bee78STejun Heo * 1946e22bee78STejun Heo * RETURNS: 1947c5aa87bbSTejun Heo * %false if no action was taken and pool->lock stayed locked, %true 1948e22bee78STejun Heo * otherwise. 1949e22bee78STejun Heo */ 195063d95a91STejun Heo static bool maybe_create_worker(struct worker_pool *pool) 1951d565ed63STejun Heo __releases(&pool->lock) 1952d565ed63STejun Heo __acquires(&pool->lock) 1953e22bee78STejun Heo { 195463d95a91STejun Heo if (!need_to_create_worker(pool)) 1955e22bee78STejun Heo return false; 1956e22bee78STejun Heo restart: 1957d565ed63STejun Heo spin_unlock_irq(&pool->lock); 19589f9c2364STejun Heo 1959e22bee78STejun Heo /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ 196063d95a91STejun Heo mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); 1961e22bee78STejun Heo 1962e22bee78STejun Heo while (true) { 1963e22bee78STejun Heo struct worker *worker; 1964e22bee78STejun Heo 1965bc2ae0f5STejun Heo worker = create_worker(pool); 1966e22bee78STejun Heo if (worker) { 196763d95a91STejun Heo del_timer_sync(&pool->mayday_timer); 1968d565ed63STejun Heo spin_lock_irq(&pool->lock); 1969e22bee78STejun Heo start_worker(worker); 19706183c009STejun Heo if (WARN_ON_ONCE(need_to_create_worker(pool))) 19716183c009STejun Heo goto restart; 1972e22bee78STejun Heo return true; 1973e22bee78STejun Heo } 1974e22bee78STejun Heo 197563d95a91STejun Heo if (!need_to_create_worker(pool)) 1976e22bee78STejun Heo break; 1977e22bee78STejun Heo 1978e22bee78STejun Heo __set_current_state(TASK_INTERRUPTIBLE); 1979e22bee78STejun Heo schedule_timeout(CREATE_COOLDOWN); 19809f9c2364STejun Heo 198163d95a91STejun Heo if (!need_to_create_worker(pool)) 1982e22bee78STejun Heo break; 1983e22bee78STejun Heo } 1984e22bee78STejun Heo 198563d95a91STejun Heo del_timer_sync(&pool->mayday_timer); 1986d565ed63STejun Heo spin_lock_irq(&pool->lock); 198763d95a91STejun Heo if (need_to_create_worker(pool)) 1988e22bee78STejun Heo goto restart; 1989e22bee78STejun Heo return true; 1990e22bee78STejun Heo } 1991e22bee78STejun Heo 1992e22bee78STejun Heo /** 1993e22bee78STejun Heo * maybe_destroy_worker - destroy workers which have been idle for a while 199463d95a91STejun Heo * @pool: pool to destroy workers for 1995e22bee78STejun Heo * 199663d95a91STejun Heo * Destroy @pool workers which have been idle for longer than 1997e22bee78STejun Heo * IDLE_WORKER_TIMEOUT. 1998e22bee78STejun Heo * 1999e22bee78STejun Heo * LOCKING: 2000d565ed63STejun Heo * spin_lock_irq(pool->lock) which may be released and regrabbed 2001e22bee78STejun Heo * multiple times. Called only from manager. 2002e22bee78STejun Heo * 2003e22bee78STejun Heo * RETURNS: 2004c5aa87bbSTejun Heo * %false if no action was taken and pool->lock stayed locked, %true 2005e22bee78STejun Heo * otherwise. 2006e22bee78STejun Heo */ 200763d95a91STejun Heo static bool maybe_destroy_workers(struct worker_pool *pool) 2008e22bee78STejun Heo { 2009e22bee78STejun Heo bool ret = false; 2010e22bee78STejun Heo 201163d95a91STejun Heo while (too_many_workers(pool)) { 2012e22bee78STejun Heo struct worker *worker; 2013e22bee78STejun Heo unsigned long expires; 2014e22bee78STejun Heo 201563d95a91STejun Heo worker = list_entry(pool->idle_list.prev, struct worker, entry); 2016e22bee78STejun Heo expires = worker->last_active + IDLE_WORKER_TIMEOUT; 2017e22bee78STejun Heo 2018e22bee78STejun Heo if (time_before(jiffies, expires)) { 201963d95a91STejun Heo mod_timer(&pool->idle_timer, expires); 2020e22bee78STejun Heo break; 2021e22bee78STejun Heo } 2022e22bee78STejun Heo 2023e22bee78STejun Heo destroy_worker(worker); 2024e22bee78STejun Heo ret = true; 2025e22bee78STejun Heo } 2026e22bee78STejun Heo 2027e22bee78STejun Heo return ret; 2028e22bee78STejun Heo } 2029e22bee78STejun Heo 2030e22bee78STejun Heo /** 2031e22bee78STejun Heo * manage_workers - manage worker pool 2032e22bee78STejun Heo * @worker: self 2033e22bee78STejun Heo * 2034706026c2STejun Heo * Assume the manager role and manage the worker pool @worker belongs 2035e22bee78STejun Heo * to. At any given time, there can be only zero or one manager per 2036706026c2STejun Heo * pool. The exclusion is handled automatically by this function. 2037e22bee78STejun Heo * 2038e22bee78STejun Heo * The caller can safely start processing works on false return. On 2039e22bee78STejun Heo * true return, it's guaranteed that need_to_create_worker() is false 2040e22bee78STejun Heo * and may_start_working() is true. 2041e22bee78STejun Heo * 2042e22bee78STejun Heo * CONTEXT: 2043d565ed63STejun Heo * spin_lock_irq(pool->lock) which may be released and regrabbed 2044e22bee78STejun Heo * multiple times. Does GFP_KERNEL allocations. 2045e22bee78STejun Heo * 2046e22bee78STejun Heo * RETURNS: 2047d565ed63STejun Heo * spin_lock_irq(pool->lock) which may be released and regrabbed 2048d565ed63STejun Heo * multiple times. Does GFP_KERNEL allocations. 2049e22bee78STejun Heo */ 2050e22bee78STejun Heo static bool manage_workers(struct worker *worker) 2051e22bee78STejun Heo { 205263d95a91STejun Heo struct worker_pool *pool = worker->pool; 2053e22bee78STejun Heo bool ret = false; 2054e22bee78STejun Heo 2055bc3a1afcSTejun Heo /* 2056bc3a1afcSTejun Heo * Managership is governed by two mutexes - manager_arb and 2057bc3a1afcSTejun Heo * manager_mutex. manager_arb handles arbitration of manager role. 2058bc3a1afcSTejun Heo * Anyone who successfully grabs manager_arb wins the arbitration 2059bc3a1afcSTejun Heo * and becomes the manager. mutex_trylock() on pool->manager_arb 2060bc3a1afcSTejun Heo * failure while holding pool->lock reliably indicates that someone 2061bc3a1afcSTejun Heo * else is managing the pool and the worker which failed trylock 2062bc3a1afcSTejun Heo * can proceed to executing work items. This means that anyone 2063bc3a1afcSTejun Heo * grabbing manager_arb is responsible for actually performing 2064bc3a1afcSTejun Heo * manager duties. If manager_arb is grabbed and released without 2065bc3a1afcSTejun Heo * actual management, the pool may stall indefinitely. 2066bc3a1afcSTejun Heo * 2067bc3a1afcSTejun Heo * manager_mutex is used for exclusion of actual management 2068bc3a1afcSTejun Heo * operations. The holder of manager_mutex can be sure that none 2069bc3a1afcSTejun Heo * of management operations, including creation and destruction of 2070bc3a1afcSTejun Heo * workers, won't take place until the mutex is released. Because 2071bc3a1afcSTejun Heo * manager_mutex doesn't interfere with manager role arbitration, 2072bc3a1afcSTejun Heo * it is guaranteed that the pool's management, while may be 2073bc3a1afcSTejun Heo * delayed, won't be disturbed by someone else grabbing 2074bc3a1afcSTejun Heo * manager_mutex. 2075bc3a1afcSTejun Heo */ 207634a06bd6STejun Heo if (!mutex_trylock(&pool->manager_arb)) 2077e22bee78STejun Heo return ret; 2078e22bee78STejun Heo 2079ee378aa4SLai Jiangshan /* 2080bc3a1afcSTejun Heo * With manager arbitration won, manager_mutex would be free in 2081bc3a1afcSTejun Heo * most cases. trylock first without dropping @pool->lock. 2082ee378aa4SLai Jiangshan */ 2083bc3a1afcSTejun Heo if (unlikely(!mutex_trylock(&pool->manager_mutex))) { 2084d565ed63STejun Heo spin_unlock_irq(&pool->lock); 2085bc3a1afcSTejun Heo mutex_lock(&pool->manager_mutex); 2086ee378aa4SLai Jiangshan /* 2087ee378aa4SLai Jiangshan * CPU hotplug could have happened while we were waiting 2088b2eb83d1SLai Jiangshan * for assoc_mutex. Hotplug itself can't handle us 2089ee378aa4SLai Jiangshan * because manager isn't either on idle or busy list, and 2090706026c2STejun Heo * @pool's state and ours could have deviated. 2091ee378aa4SLai Jiangshan * 2092bc3a1afcSTejun Heo * As hotplug is now excluded via manager_mutex, we can 2093ee378aa4SLai Jiangshan * simply try to bind. It will succeed or fail depending 2094706026c2STejun Heo * on @pool's current state. Try it and adjust 2095ee378aa4SLai Jiangshan * %WORKER_UNBOUND accordingly. 2096ee378aa4SLai Jiangshan */ 2097f36dc67bSLai Jiangshan if (worker_maybe_bind_and_lock(pool)) 2098ee378aa4SLai Jiangshan worker->flags &= ~WORKER_UNBOUND; 2099ee378aa4SLai Jiangshan else 2100ee378aa4SLai Jiangshan worker->flags |= WORKER_UNBOUND; 2101ee378aa4SLai Jiangshan 2102ee378aa4SLai Jiangshan ret = true; 2103ee378aa4SLai Jiangshan } 2104ee378aa4SLai Jiangshan 210511ebea50STejun Heo pool->flags &= ~POOL_MANAGE_WORKERS; 2106e22bee78STejun Heo 2107e22bee78STejun Heo /* 2108e22bee78STejun Heo * Destroy and then create so that may_start_working() is true 2109e22bee78STejun Heo * on return. 2110e22bee78STejun Heo */ 211163d95a91STejun Heo ret |= maybe_destroy_workers(pool); 211263d95a91STejun Heo ret |= maybe_create_worker(pool); 2113e22bee78STejun Heo 2114bc3a1afcSTejun Heo mutex_unlock(&pool->manager_mutex); 211534a06bd6STejun Heo mutex_unlock(&pool->manager_arb); 2116e22bee78STejun Heo return ret; 2117e22bee78STejun Heo } 2118e22bee78STejun Heo 2119a62428c0STejun Heo /** 2120a62428c0STejun Heo * process_one_work - process single work 2121c34056a3STejun Heo * @worker: self 2122a62428c0STejun Heo * @work: work to process 2123a62428c0STejun Heo * 2124a62428c0STejun Heo * Process @work. This function contains all the logics necessary to 2125a62428c0STejun Heo * process a single work including synchronization against and 2126a62428c0STejun Heo * interaction with other workers on the same cpu, queueing and 2127a62428c0STejun Heo * flushing. As long as context requirement is met, any worker can 2128a62428c0STejun Heo * call this function to process a work. 2129a62428c0STejun Heo * 2130a62428c0STejun Heo * CONTEXT: 2131d565ed63STejun Heo * spin_lock_irq(pool->lock) which is released and regrabbed. 2132a62428c0STejun Heo */ 2133c34056a3STejun Heo static void process_one_work(struct worker *worker, struct work_struct *work) 2134d565ed63STejun Heo __releases(&pool->lock) 2135d565ed63STejun Heo __acquires(&pool->lock) 21361da177e4SLinus Torvalds { 2137112202d9STejun Heo struct pool_workqueue *pwq = get_work_pwq(work); 2138bd7bdd43STejun Heo struct worker_pool *pool = worker->pool; 2139112202d9STejun Heo bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE; 214073f53c4aSTejun Heo int work_color; 21417e11629dSTejun Heo struct worker *collision; 21424e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP 21434e6045f1SJohannes Berg /* 2144a62428c0STejun Heo * It is permissible to free the struct work_struct from 2145a62428c0STejun Heo * inside the function that is called from it, this we need to 2146a62428c0STejun Heo * take into account for lockdep too. To avoid bogus "held 2147a62428c0STejun Heo * lock freed" warnings as well as problems when looking into 2148a62428c0STejun Heo * work->lockdep_map, make a copy and use that here. 21494e6045f1SJohannes Berg */ 21504d82a1deSPeter Zijlstra struct lockdep_map lockdep_map; 21514d82a1deSPeter Zijlstra 21524d82a1deSPeter Zijlstra lockdep_copy_map(&lockdep_map, &work->lockdep_map); 21534e6045f1SJohannes Berg #endif 21546fec10a1STejun Heo /* 21556fec10a1STejun Heo * Ensure we're on the correct CPU. DISASSOCIATED test is 21566fec10a1STejun Heo * necessary to avoid spurious warnings from rescuers servicing the 215724647570STejun Heo * unbound or a disassociated pool. 21586fec10a1STejun Heo */ 21595f7dabfdSLai Jiangshan WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) && 216024647570STejun Heo !(pool->flags & POOL_DISASSOCIATED) && 2161ec22ca5eSTejun Heo raw_smp_processor_id() != pool->cpu); 216225511a47STejun Heo 21637e11629dSTejun Heo /* 21647e11629dSTejun Heo * A single work shouldn't be executed concurrently by 21657e11629dSTejun Heo * multiple workers on a single cpu. Check whether anyone is 21667e11629dSTejun Heo * already processing the work. If so, defer the work to the 21677e11629dSTejun Heo * currently executing one. 21687e11629dSTejun Heo */ 2169c9e7cf27STejun Heo collision = find_worker_executing_work(pool, work); 21707e11629dSTejun Heo if (unlikely(collision)) { 21717e11629dSTejun Heo move_linked_works(work, &collision->scheduled, NULL); 21727e11629dSTejun Heo return; 21737e11629dSTejun Heo } 21741da177e4SLinus Torvalds 21758930cabaSTejun Heo /* claim and dequeue */ 21761da177e4SLinus Torvalds debug_work_deactivate(work); 2177c9e7cf27STejun Heo hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); 2178c34056a3STejun Heo worker->current_work = work; 2179a2c1c57bSTejun Heo worker->current_func = work->func; 2180112202d9STejun Heo worker->current_pwq = pwq; 218173f53c4aSTejun Heo work_color = get_work_color(work); 21827a22ad75STejun Heo 2183a62428c0STejun Heo list_del_init(&work->entry); 2184a62428c0STejun Heo 2185649027d7STejun Heo /* 2186fb0e7bebSTejun Heo * CPU intensive works don't participate in concurrency 2187fb0e7bebSTejun Heo * management. They're the scheduler's responsibility. 2188fb0e7bebSTejun Heo */ 2189fb0e7bebSTejun Heo if (unlikely(cpu_intensive)) 2190fb0e7bebSTejun Heo worker_set_flags(worker, WORKER_CPU_INTENSIVE, true); 2191fb0e7bebSTejun Heo 2192974271c4STejun Heo /* 2193d565ed63STejun Heo * Unbound pool isn't concurrency managed and work items should be 2194974271c4STejun Heo * executed ASAP. Wake up another worker if necessary. 2195974271c4STejun Heo */ 219663d95a91STejun Heo if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool)) 219763d95a91STejun Heo wake_up_worker(pool); 2198974271c4STejun Heo 21998930cabaSTejun Heo /* 22007c3eed5cSTejun Heo * Record the last pool and clear PENDING which should be the last 2201d565ed63STejun Heo * update to @work. Also, do this inside @pool->lock so that 220223657bb1STejun Heo * PENDING and queued state changes happen together while IRQ is 220323657bb1STejun Heo * disabled. 22048930cabaSTejun Heo */ 22057c3eed5cSTejun Heo set_work_pool_and_clear_pending(work, pool->id); 22061da177e4SLinus Torvalds 2207d565ed63STejun Heo spin_unlock_irq(&pool->lock); 2208365970a1SDavid Howells 2209112202d9STejun Heo lock_map_acquire_read(&pwq->wq->lockdep_map); 22103295f0efSIngo Molnar lock_map_acquire(&lockdep_map); 2211e36c886aSArjan van de Ven trace_workqueue_execute_start(work); 2212a2c1c57bSTejun Heo worker->current_func(work); 2213e36c886aSArjan van de Ven /* 2214e36c886aSArjan van de Ven * While we must be careful to not use "work" after this, the trace 2215e36c886aSArjan van de Ven * point will only record its address. 2216e36c886aSArjan van de Ven */ 2217e36c886aSArjan van de Ven trace_workqueue_execute_end(work); 22183295f0efSIngo Molnar lock_map_release(&lockdep_map); 2219112202d9STejun Heo lock_map_release(&pwq->wq->lockdep_map); 22201da177e4SLinus Torvalds 2221d5abe669SPeter Zijlstra if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 2222044c782cSValentin Ilie pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" 2223044c782cSValentin Ilie " last function: %pf\n", 2224a2c1c57bSTejun Heo current->comm, preempt_count(), task_pid_nr(current), 2225a2c1c57bSTejun Heo worker->current_func); 2226d5abe669SPeter Zijlstra debug_show_held_locks(current); 2227d5abe669SPeter Zijlstra dump_stack(); 2228d5abe669SPeter Zijlstra } 2229d5abe669SPeter Zijlstra 2230d565ed63STejun Heo spin_lock_irq(&pool->lock); 2231a62428c0STejun Heo 2232fb0e7bebSTejun Heo /* clear cpu intensive status */ 2233fb0e7bebSTejun Heo if (unlikely(cpu_intensive)) 2234fb0e7bebSTejun Heo worker_clr_flags(worker, WORKER_CPU_INTENSIVE); 2235fb0e7bebSTejun Heo 2236a62428c0STejun Heo /* we're done with it, release */ 223742f8570fSSasha Levin hash_del(&worker->hentry); 2238c34056a3STejun Heo worker->current_work = NULL; 2239a2c1c57bSTejun Heo worker->current_func = NULL; 2240112202d9STejun Heo worker->current_pwq = NULL; 2241112202d9STejun Heo pwq_dec_nr_in_flight(pwq, work_color); 22421da177e4SLinus Torvalds } 22431da177e4SLinus Torvalds 2244affee4b2STejun Heo /** 2245affee4b2STejun Heo * process_scheduled_works - process scheduled works 2246affee4b2STejun Heo * @worker: self 2247affee4b2STejun Heo * 2248affee4b2STejun Heo * Process all scheduled works. Please note that the scheduled list 2249affee4b2STejun Heo * may change while processing a work, so this function repeatedly 2250affee4b2STejun Heo * fetches a work from the top and executes it. 2251affee4b2STejun Heo * 2252affee4b2STejun Heo * CONTEXT: 2253d565ed63STejun Heo * spin_lock_irq(pool->lock) which may be released and regrabbed 2254affee4b2STejun Heo * multiple times. 2255affee4b2STejun Heo */ 2256affee4b2STejun Heo static void process_scheduled_works(struct worker *worker) 22571da177e4SLinus Torvalds { 2258affee4b2STejun Heo while (!list_empty(&worker->scheduled)) { 2259affee4b2STejun Heo struct work_struct *work = list_first_entry(&worker->scheduled, 2260a62428c0STejun Heo struct work_struct, entry); 2261c34056a3STejun Heo process_one_work(worker, work); 2262a62428c0STejun Heo } 22631da177e4SLinus Torvalds } 22641da177e4SLinus Torvalds 22654690c4abSTejun Heo /** 22664690c4abSTejun Heo * worker_thread - the worker thread function 2267c34056a3STejun Heo * @__worker: self 22684690c4abSTejun Heo * 2269c5aa87bbSTejun Heo * The worker thread function. All workers belong to a worker_pool - 2270c5aa87bbSTejun Heo * either a per-cpu one or dynamic unbound one. These workers process all 2271c5aa87bbSTejun Heo * work items regardless of their specific target workqueue. The only 2272c5aa87bbSTejun Heo * exception is work items which belong to workqueues with a rescuer which 2273c5aa87bbSTejun Heo * will be explained in rescuer_thread(). 22744690c4abSTejun Heo */ 2275c34056a3STejun Heo static int worker_thread(void *__worker) 22761da177e4SLinus Torvalds { 2277c34056a3STejun Heo struct worker *worker = __worker; 2278bd7bdd43STejun Heo struct worker_pool *pool = worker->pool; 22791da177e4SLinus Torvalds 2280e22bee78STejun Heo /* tell the scheduler that this is a workqueue worker */ 2281e22bee78STejun Heo worker->task->flags |= PF_WQ_WORKER; 2282c8e55f36STejun Heo woke_up: 2283d565ed63STejun Heo spin_lock_irq(&pool->lock); 2284affee4b2STejun Heo 22855f7dabfdSLai Jiangshan /* we are off idle list if destruction or rebind is requested */ 22865f7dabfdSLai Jiangshan if (unlikely(list_empty(&worker->entry))) { 2287d565ed63STejun Heo spin_unlock_irq(&pool->lock); 228825511a47STejun Heo 22895f7dabfdSLai Jiangshan /* if DIE is set, destruction is requested */ 229025511a47STejun Heo if (worker->flags & WORKER_DIE) { 2291e22bee78STejun Heo worker->task->flags &= ~PF_WQ_WORKER; 2292c8e55f36STejun Heo return 0; 2293c8e55f36STejun Heo } 2294c8e55f36STejun Heo 22955f7dabfdSLai Jiangshan /* otherwise, rebind */ 229625511a47STejun Heo idle_worker_rebind(worker); 229725511a47STejun Heo goto woke_up; 229825511a47STejun Heo } 229925511a47STejun Heo 2300c8e55f36STejun Heo worker_leave_idle(worker); 2301db7bccf4STejun Heo recheck: 2302e22bee78STejun Heo /* no more worker necessary? */ 230363d95a91STejun Heo if (!need_more_worker(pool)) 2304e22bee78STejun Heo goto sleep; 2305e22bee78STejun Heo 2306e22bee78STejun Heo /* do we need to manage? */ 230763d95a91STejun Heo if (unlikely(!may_start_working(pool)) && manage_workers(worker)) 2308e22bee78STejun Heo goto recheck; 2309e22bee78STejun Heo 2310c8e55f36STejun Heo /* 2311c8e55f36STejun Heo * ->scheduled list can only be filled while a worker is 2312c8e55f36STejun Heo * preparing to process a work or actually processing it. 2313c8e55f36STejun Heo * Make sure nobody diddled with it while I was sleeping. 2314c8e55f36STejun Heo */ 23156183c009STejun Heo WARN_ON_ONCE(!list_empty(&worker->scheduled)); 2316c8e55f36STejun Heo 2317e22bee78STejun Heo /* 2318e22bee78STejun Heo * When control reaches this point, we're guaranteed to have 2319e22bee78STejun Heo * at least one idle worker or that someone else has already 2320e22bee78STejun Heo * assumed the manager role. 2321e22bee78STejun Heo */ 2322e22bee78STejun Heo worker_clr_flags(worker, WORKER_PREP); 2323e22bee78STejun Heo 2324e22bee78STejun Heo do { 2325affee4b2STejun Heo struct work_struct *work = 2326bd7bdd43STejun Heo list_first_entry(&pool->worklist, 2327affee4b2STejun Heo struct work_struct, entry); 2328affee4b2STejun Heo 2329c8e55f36STejun Heo if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { 2330affee4b2STejun Heo /* optimization path, not strictly necessary */ 2331affee4b2STejun Heo process_one_work(worker, work); 2332affee4b2STejun Heo if (unlikely(!list_empty(&worker->scheduled))) 2333affee4b2STejun Heo process_scheduled_works(worker); 2334affee4b2STejun Heo } else { 2335c8e55f36STejun Heo move_linked_works(work, &worker->scheduled, NULL); 2336affee4b2STejun Heo process_scheduled_works(worker); 2337affee4b2STejun Heo } 233863d95a91STejun Heo } while (keep_working(pool)); 2339affee4b2STejun Heo 2340e22bee78STejun Heo worker_set_flags(worker, WORKER_PREP, false); 2341d313dd85STejun Heo sleep: 234263d95a91STejun Heo if (unlikely(need_to_manage_workers(pool)) && manage_workers(worker)) 2343e22bee78STejun Heo goto recheck; 2344d313dd85STejun Heo 2345c8e55f36STejun Heo /* 2346d565ed63STejun Heo * pool->lock is held and there's no work to process and no need to 2347d565ed63STejun Heo * manage, sleep. Workers are woken up only while holding 2348d565ed63STejun Heo * pool->lock or from local cpu, so setting the current state 2349d565ed63STejun Heo * before releasing pool->lock is enough to prevent losing any 2350d565ed63STejun Heo * event. 2351c8e55f36STejun Heo */ 2352c8e55f36STejun Heo worker_enter_idle(worker); 2353c8e55f36STejun Heo __set_current_state(TASK_INTERRUPTIBLE); 2354d565ed63STejun Heo spin_unlock_irq(&pool->lock); 23551da177e4SLinus Torvalds schedule(); 2356c8e55f36STejun Heo goto woke_up; 23571da177e4SLinus Torvalds } 23581da177e4SLinus Torvalds 2359e22bee78STejun Heo /** 2360e22bee78STejun Heo * rescuer_thread - the rescuer thread function 2361111c225aSTejun Heo * @__rescuer: self 2362e22bee78STejun Heo * 2363e22bee78STejun Heo * Workqueue rescuer thread function. There's one rescuer for each 2364493008a8STejun Heo * workqueue which has WQ_MEM_RECLAIM set. 2365e22bee78STejun Heo * 2366706026c2STejun Heo * Regular work processing on a pool may block trying to create a new 2367e22bee78STejun Heo * worker which uses GFP_KERNEL allocation which has slight chance of 2368e22bee78STejun Heo * developing into deadlock if some works currently on the same queue 2369e22bee78STejun Heo * need to be processed to satisfy the GFP_KERNEL allocation. This is 2370e22bee78STejun Heo * the problem rescuer solves. 2371e22bee78STejun Heo * 2372706026c2STejun Heo * When such condition is possible, the pool summons rescuers of all 2373706026c2STejun Heo * workqueues which have works queued on the pool and let them process 2374e22bee78STejun Heo * those works so that forward progress can be guaranteed. 2375e22bee78STejun Heo * 2376e22bee78STejun Heo * This should happen rarely. 2377e22bee78STejun Heo */ 2378111c225aSTejun Heo static int rescuer_thread(void *__rescuer) 2379e22bee78STejun Heo { 2380111c225aSTejun Heo struct worker *rescuer = __rescuer; 2381111c225aSTejun Heo struct workqueue_struct *wq = rescuer->rescue_wq; 2382e22bee78STejun Heo struct list_head *scheduled = &rescuer->scheduled; 2383e22bee78STejun Heo 2384e22bee78STejun Heo set_user_nice(current, RESCUER_NICE_LEVEL); 2385111c225aSTejun Heo 2386111c225aSTejun Heo /* 2387111c225aSTejun Heo * Mark rescuer as worker too. As WORKER_PREP is never cleared, it 2388111c225aSTejun Heo * doesn't participate in concurrency management. 2389111c225aSTejun Heo */ 2390111c225aSTejun Heo rescuer->task->flags |= PF_WQ_WORKER; 2391e22bee78STejun Heo repeat: 2392e22bee78STejun Heo set_current_state(TASK_INTERRUPTIBLE); 23931da177e4SLinus Torvalds 2394412d32e6SMike Galbraith if (kthread_should_stop()) { 2395412d32e6SMike Galbraith __set_current_state(TASK_RUNNING); 2396111c225aSTejun Heo rescuer->task->flags &= ~PF_WQ_WORKER; 2397e22bee78STejun Heo return 0; 2398412d32e6SMike Galbraith } 23991da177e4SLinus Torvalds 2400493a1724STejun Heo /* see whether any pwq is asking for help */ 2401493a1724STejun Heo spin_lock_irq(&workqueue_lock); 2402493a1724STejun Heo 2403493a1724STejun Heo while (!list_empty(&wq->maydays)) { 2404493a1724STejun Heo struct pool_workqueue *pwq = list_first_entry(&wq->maydays, 2405493a1724STejun Heo struct pool_workqueue, mayday_node); 2406112202d9STejun Heo struct worker_pool *pool = pwq->pool; 2407e22bee78STejun Heo struct work_struct *work, *n; 2408e22bee78STejun Heo 2409e22bee78STejun Heo __set_current_state(TASK_RUNNING); 2410493a1724STejun Heo list_del_init(&pwq->mayday_node); 2411493a1724STejun Heo 2412493a1724STejun Heo spin_unlock_irq(&workqueue_lock); 2413e22bee78STejun Heo 2414e22bee78STejun Heo /* migrate to the target cpu if possible */ 2415f36dc67bSLai Jiangshan worker_maybe_bind_and_lock(pool); 2416b3104104SLai Jiangshan rescuer->pool = pool; 2417e22bee78STejun Heo 2418e22bee78STejun Heo /* 2419e22bee78STejun Heo * Slurp in all works issued via this workqueue and 2420e22bee78STejun Heo * process'em. 2421e22bee78STejun Heo */ 24226183c009STejun Heo WARN_ON_ONCE(!list_empty(&rescuer->scheduled)); 2423bd7bdd43STejun Heo list_for_each_entry_safe(work, n, &pool->worklist, entry) 2424112202d9STejun Heo if (get_work_pwq(work) == pwq) 2425e22bee78STejun Heo move_linked_works(work, scheduled, &n); 2426e22bee78STejun Heo 2427e22bee78STejun Heo process_scheduled_works(rescuer); 24287576958aSTejun Heo 24297576958aSTejun Heo /* 2430d565ed63STejun Heo * Leave this pool. If keep_working() is %true, notify a 24317576958aSTejun Heo * regular worker; otherwise, we end up with 0 concurrency 24327576958aSTejun Heo * and stalling the execution. 24337576958aSTejun Heo */ 243463d95a91STejun Heo if (keep_working(pool)) 243563d95a91STejun Heo wake_up_worker(pool); 24367576958aSTejun Heo 2437b3104104SLai Jiangshan rescuer->pool = NULL; 2438493a1724STejun Heo spin_unlock(&pool->lock); 2439493a1724STejun Heo spin_lock(&workqueue_lock); 24401da177e4SLinus Torvalds } 24411da177e4SLinus Torvalds 2442493a1724STejun Heo spin_unlock_irq(&workqueue_lock); 2443493a1724STejun Heo 2444111c225aSTejun Heo /* rescuers should never participate in concurrency management */ 2445111c225aSTejun Heo WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING)); 2446e22bee78STejun Heo schedule(); 2447e22bee78STejun Heo goto repeat; 24481da177e4SLinus Torvalds } 24491da177e4SLinus Torvalds 2450fc2e4d70SOleg Nesterov struct wq_barrier { 2451fc2e4d70SOleg Nesterov struct work_struct work; 2452fc2e4d70SOleg Nesterov struct completion done; 2453fc2e4d70SOleg Nesterov }; 2454fc2e4d70SOleg Nesterov 2455fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work) 2456fc2e4d70SOleg Nesterov { 2457fc2e4d70SOleg Nesterov struct wq_barrier *barr = container_of(work, struct wq_barrier, work); 2458fc2e4d70SOleg Nesterov complete(&barr->done); 2459fc2e4d70SOleg Nesterov } 2460fc2e4d70SOleg Nesterov 24614690c4abSTejun Heo /** 24624690c4abSTejun Heo * insert_wq_barrier - insert a barrier work 2463112202d9STejun Heo * @pwq: pwq to insert barrier into 24644690c4abSTejun Heo * @barr: wq_barrier to insert 2465affee4b2STejun Heo * @target: target work to attach @barr to 2466affee4b2STejun Heo * @worker: worker currently executing @target, NULL if @target is not executing 24674690c4abSTejun Heo * 2468affee4b2STejun Heo * @barr is linked to @target such that @barr is completed only after 2469affee4b2STejun Heo * @target finishes execution. Please note that the ordering 2470affee4b2STejun Heo * guarantee is observed only with respect to @target and on the local 2471affee4b2STejun Heo * cpu. 2472affee4b2STejun Heo * 2473affee4b2STejun Heo * Currently, a queued barrier can't be canceled. This is because 2474affee4b2STejun Heo * try_to_grab_pending() can't determine whether the work to be 2475affee4b2STejun Heo * grabbed is at the head of the queue and thus can't clear LINKED 2476affee4b2STejun Heo * flag of the previous work while there must be a valid next work 2477affee4b2STejun Heo * after a work with LINKED flag set. 2478affee4b2STejun Heo * 2479affee4b2STejun Heo * Note that when @worker is non-NULL, @target may be modified 2480112202d9STejun Heo * underneath us, so we can't reliably determine pwq from @target. 24814690c4abSTejun Heo * 24824690c4abSTejun Heo * CONTEXT: 2483d565ed63STejun Heo * spin_lock_irq(pool->lock). 24844690c4abSTejun Heo */ 2485112202d9STejun Heo static void insert_wq_barrier(struct pool_workqueue *pwq, 2486affee4b2STejun Heo struct wq_barrier *barr, 2487affee4b2STejun Heo struct work_struct *target, struct worker *worker) 2488fc2e4d70SOleg Nesterov { 2489affee4b2STejun Heo struct list_head *head; 2490affee4b2STejun Heo unsigned int linked = 0; 2491affee4b2STejun Heo 2492dc186ad7SThomas Gleixner /* 2493d565ed63STejun Heo * debugobject calls are safe here even with pool->lock locked 2494dc186ad7SThomas Gleixner * as we know for sure that this will not trigger any of the 2495dc186ad7SThomas Gleixner * checks and call back into the fixup functions where we 2496dc186ad7SThomas Gleixner * might deadlock. 2497dc186ad7SThomas Gleixner */ 2498ca1cab37SAndrew Morton INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); 249922df02bbSTejun Heo __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); 2500fc2e4d70SOleg Nesterov init_completion(&barr->done); 250183c22520SOleg Nesterov 2502affee4b2STejun Heo /* 2503affee4b2STejun Heo * If @target is currently being executed, schedule the 2504affee4b2STejun Heo * barrier to the worker; otherwise, put it after @target. 2505affee4b2STejun Heo */ 2506affee4b2STejun Heo if (worker) 2507affee4b2STejun Heo head = worker->scheduled.next; 2508affee4b2STejun Heo else { 2509affee4b2STejun Heo unsigned long *bits = work_data_bits(target); 2510affee4b2STejun Heo 2511affee4b2STejun Heo head = target->entry.next; 2512affee4b2STejun Heo /* there can already be other linked works, inherit and set */ 2513affee4b2STejun Heo linked = *bits & WORK_STRUCT_LINKED; 2514affee4b2STejun Heo __set_bit(WORK_STRUCT_LINKED_BIT, bits); 2515affee4b2STejun Heo } 2516affee4b2STejun Heo 2517dc186ad7SThomas Gleixner debug_work_activate(&barr->work); 2518112202d9STejun Heo insert_work(pwq, &barr->work, head, 2519affee4b2STejun Heo work_color_to_flags(WORK_NO_COLOR) | linked); 2520fc2e4d70SOleg Nesterov } 2521fc2e4d70SOleg Nesterov 252273f53c4aSTejun Heo /** 2523112202d9STejun Heo * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing 252473f53c4aSTejun Heo * @wq: workqueue being flushed 252573f53c4aSTejun Heo * @flush_color: new flush color, < 0 for no-op 252673f53c4aSTejun Heo * @work_color: new work color, < 0 for no-op 252773f53c4aSTejun Heo * 2528112202d9STejun Heo * Prepare pwqs for workqueue flushing. 252973f53c4aSTejun Heo * 2530112202d9STejun Heo * If @flush_color is non-negative, flush_color on all pwqs should be 2531112202d9STejun Heo * -1. If no pwq has in-flight commands at the specified color, all 2532112202d9STejun Heo * pwq->flush_color's stay at -1 and %false is returned. If any pwq 2533112202d9STejun Heo * has in flight commands, its pwq->flush_color is set to 2534112202d9STejun Heo * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq 253573f53c4aSTejun Heo * wakeup logic is armed and %true is returned. 253673f53c4aSTejun Heo * 253773f53c4aSTejun Heo * The caller should have initialized @wq->first_flusher prior to 253873f53c4aSTejun Heo * calling this function with non-negative @flush_color. If 253973f53c4aSTejun Heo * @flush_color is negative, no flush color update is done and %false 254073f53c4aSTejun Heo * is returned. 254173f53c4aSTejun Heo * 2542112202d9STejun Heo * If @work_color is non-negative, all pwqs should have the same 254373f53c4aSTejun Heo * work_color which is previous to @work_color and all will be 254473f53c4aSTejun Heo * advanced to @work_color. 254573f53c4aSTejun Heo * 254673f53c4aSTejun Heo * CONTEXT: 254773f53c4aSTejun Heo * mutex_lock(wq->flush_mutex). 254873f53c4aSTejun Heo * 254973f53c4aSTejun Heo * RETURNS: 255073f53c4aSTejun Heo * %true if @flush_color >= 0 and there's something to flush. %false 255173f53c4aSTejun Heo * otherwise. 255273f53c4aSTejun Heo */ 2553112202d9STejun Heo static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, 255473f53c4aSTejun Heo int flush_color, int work_color) 25551da177e4SLinus Torvalds { 255673f53c4aSTejun Heo bool wait = false; 255749e3cf44STejun Heo struct pool_workqueue *pwq; 25581da177e4SLinus Torvalds 255973f53c4aSTejun Heo if (flush_color >= 0) { 25606183c009STejun Heo WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush)); 2561112202d9STejun Heo atomic_set(&wq->nr_pwqs_to_flush, 1); 2562dc186ad7SThomas Gleixner } 256314441960SOleg Nesterov 256476af4d93STejun Heo local_irq_disable(); 256576af4d93STejun Heo 256649e3cf44STejun Heo for_each_pwq(pwq, wq) { 2567112202d9STejun Heo struct worker_pool *pool = pwq->pool; 25681da177e4SLinus Torvalds 256976af4d93STejun Heo spin_lock(&pool->lock); 257073f53c4aSTejun Heo 257173f53c4aSTejun Heo if (flush_color >= 0) { 25726183c009STejun Heo WARN_ON_ONCE(pwq->flush_color != -1); 257373f53c4aSTejun Heo 2574112202d9STejun Heo if (pwq->nr_in_flight[flush_color]) { 2575112202d9STejun Heo pwq->flush_color = flush_color; 2576112202d9STejun Heo atomic_inc(&wq->nr_pwqs_to_flush); 257773f53c4aSTejun Heo wait = true; 25781da177e4SLinus Torvalds } 257973f53c4aSTejun Heo } 258073f53c4aSTejun Heo 258173f53c4aSTejun Heo if (work_color >= 0) { 25826183c009STejun Heo WARN_ON_ONCE(work_color != work_next_color(pwq->work_color)); 2583112202d9STejun Heo pwq->work_color = work_color; 258473f53c4aSTejun Heo } 258573f53c4aSTejun Heo 258676af4d93STejun Heo spin_unlock(&pool->lock); 25871da177e4SLinus Torvalds } 25881da177e4SLinus Torvalds 258976af4d93STejun Heo local_irq_enable(); 259076af4d93STejun Heo 2591112202d9STejun Heo if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) 259273f53c4aSTejun Heo complete(&wq->first_flusher->done); 259373f53c4aSTejun Heo 259473f53c4aSTejun Heo return wait; 259583c22520SOleg Nesterov } 25961da177e4SLinus Torvalds 25970fcb78c2SRolf Eike Beer /** 25981da177e4SLinus Torvalds * flush_workqueue - ensure that any scheduled work has run to completion. 25990fcb78c2SRolf Eike Beer * @wq: workqueue to flush 26001da177e4SLinus Torvalds * 2601c5aa87bbSTejun Heo * This function sleeps until all work items which were queued on entry 2602c5aa87bbSTejun Heo * have finished execution, but it is not livelocked by new incoming ones. 26031da177e4SLinus Torvalds */ 26047ad5b3a5SHarvey Harrison void flush_workqueue(struct workqueue_struct *wq) 26051da177e4SLinus Torvalds { 260673f53c4aSTejun Heo struct wq_flusher this_flusher = { 260773f53c4aSTejun Heo .list = LIST_HEAD_INIT(this_flusher.list), 260873f53c4aSTejun Heo .flush_color = -1, 260973f53c4aSTejun Heo .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done), 261073f53c4aSTejun Heo }; 261173f53c4aSTejun Heo int next_color; 2612b1f4ec17SOleg Nesterov 26133295f0efSIngo Molnar lock_map_acquire(&wq->lockdep_map); 26143295f0efSIngo Molnar lock_map_release(&wq->lockdep_map); 261573f53c4aSTejun Heo 261673f53c4aSTejun Heo mutex_lock(&wq->flush_mutex); 261773f53c4aSTejun Heo 261873f53c4aSTejun Heo /* 261973f53c4aSTejun Heo * Start-to-wait phase 262073f53c4aSTejun Heo */ 262173f53c4aSTejun Heo next_color = work_next_color(wq->work_color); 262273f53c4aSTejun Heo 262373f53c4aSTejun Heo if (next_color != wq->flush_color) { 262473f53c4aSTejun Heo /* 262573f53c4aSTejun Heo * Color space is not full. The current work_color 262673f53c4aSTejun Heo * becomes our flush_color and work_color is advanced 262773f53c4aSTejun Heo * by one. 262873f53c4aSTejun Heo */ 26296183c009STejun Heo WARN_ON_ONCE(!list_empty(&wq->flusher_overflow)); 263073f53c4aSTejun Heo this_flusher.flush_color = wq->work_color; 263173f53c4aSTejun Heo wq->work_color = next_color; 263273f53c4aSTejun Heo 263373f53c4aSTejun Heo if (!wq->first_flusher) { 263473f53c4aSTejun Heo /* no flush in progress, become the first flusher */ 26356183c009STejun Heo WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 263673f53c4aSTejun Heo 263773f53c4aSTejun Heo wq->first_flusher = &this_flusher; 263873f53c4aSTejun Heo 2639112202d9STejun Heo if (!flush_workqueue_prep_pwqs(wq, wq->flush_color, 264073f53c4aSTejun Heo wq->work_color)) { 264173f53c4aSTejun Heo /* nothing to flush, done */ 264273f53c4aSTejun Heo wq->flush_color = next_color; 264373f53c4aSTejun Heo wq->first_flusher = NULL; 264473f53c4aSTejun Heo goto out_unlock; 264573f53c4aSTejun Heo } 264673f53c4aSTejun Heo } else { 264773f53c4aSTejun Heo /* wait in queue */ 26486183c009STejun Heo WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color); 264973f53c4aSTejun Heo list_add_tail(&this_flusher.list, &wq->flusher_queue); 2650112202d9STejun Heo flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 265173f53c4aSTejun Heo } 265273f53c4aSTejun Heo } else { 265373f53c4aSTejun Heo /* 265473f53c4aSTejun Heo * Oops, color space is full, wait on overflow queue. 265573f53c4aSTejun Heo * The next flush completion will assign us 265673f53c4aSTejun Heo * flush_color and transfer to flusher_queue. 265773f53c4aSTejun Heo */ 265873f53c4aSTejun Heo list_add_tail(&this_flusher.list, &wq->flusher_overflow); 265973f53c4aSTejun Heo } 266073f53c4aSTejun Heo 266173f53c4aSTejun Heo mutex_unlock(&wq->flush_mutex); 266273f53c4aSTejun Heo 266373f53c4aSTejun Heo wait_for_completion(&this_flusher.done); 266473f53c4aSTejun Heo 266573f53c4aSTejun Heo /* 266673f53c4aSTejun Heo * Wake-up-and-cascade phase 266773f53c4aSTejun Heo * 266873f53c4aSTejun Heo * First flushers are responsible for cascading flushes and 266973f53c4aSTejun Heo * handling overflow. Non-first flushers can simply return. 267073f53c4aSTejun Heo */ 267173f53c4aSTejun Heo if (wq->first_flusher != &this_flusher) 267273f53c4aSTejun Heo return; 267373f53c4aSTejun Heo 267473f53c4aSTejun Heo mutex_lock(&wq->flush_mutex); 267573f53c4aSTejun Heo 26764ce48b37STejun Heo /* we might have raced, check again with mutex held */ 26774ce48b37STejun Heo if (wq->first_flusher != &this_flusher) 26784ce48b37STejun Heo goto out_unlock; 26794ce48b37STejun Heo 268073f53c4aSTejun Heo wq->first_flusher = NULL; 268173f53c4aSTejun Heo 26826183c009STejun Heo WARN_ON_ONCE(!list_empty(&this_flusher.list)); 26836183c009STejun Heo WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 268473f53c4aSTejun Heo 268573f53c4aSTejun Heo while (true) { 268673f53c4aSTejun Heo struct wq_flusher *next, *tmp; 268773f53c4aSTejun Heo 268873f53c4aSTejun Heo /* complete all the flushers sharing the current flush color */ 268973f53c4aSTejun Heo list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { 269073f53c4aSTejun Heo if (next->flush_color != wq->flush_color) 269173f53c4aSTejun Heo break; 269273f53c4aSTejun Heo list_del_init(&next->list); 269373f53c4aSTejun Heo complete(&next->done); 269473f53c4aSTejun Heo } 269573f53c4aSTejun Heo 26966183c009STejun Heo WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) && 269773f53c4aSTejun Heo wq->flush_color != work_next_color(wq->work_color)); 269873f53c4aSTejun Heo 269973f53c4aSTejun Heo /* this flush_color is finished, advance by one */ 270073f53c4aSTejun Heo wq->flush_color = work_next_color(wq->flush_color); 270173f53c4aSTejun Heo 270273f53c4aSTejun Heo /* one color has been freed, handle overflow queue */ 270373f53c4aSTejun Heo if (!list_empty(&wq->flusher_overflow)) { 270473f53c4aSTejun Heo /* 270573f53c4aSTejun Heo * Assign the same color to all overflowed 270673f53c4aSTejun Heo * flushers, advance work_color and append to 270773f53c4aSTejun Heo * flusher_queue. This is the start-to-wait 270873f53c4aSTejun Heo * phase for these overflowed flushers. 270973f53c4aSTejun Heo */ 271073f53c4aSTejun Heo list_for_each_entry(tmp, &wq->flusher_overflow, list) 271173f53c4aSTejun Heo tmp->flush_color = wq->work_color; 271273f53c4aSTejun Heo 271373f53c4aSTejun Heo wq->work_color = work_next_color(wq->work_color); 271473f53c4aSTejun Heo 271573f53c4aSTejun Heo list_splice_tail_init(&wq->flusher_overflow, 271673f53c4aSTejun Heo &wq->flusher_queue); 2717112202d9STejun Heo flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 271873f53c4aSTejun Heo } 271973f53c4aSTejun Heo 272073f53c4aSTejun Heo if (list_empty(&wq->flusher_queue)) { 27216183c009STejun Heo WARN_ON_ONCE(wq->flush_color != wq->work_color); 272273f53c4aSTejun Heo break; 272373f53c4aSTejun Heo } 272473f53c4aSTejun Heo 272573f53c4aSTejun Heo /* 272673f53c4aSTejun Heo * Need to flush more colors. Make the next flusher 2727112202d9STejun Heo * the new first flusher and arm pwqs. 272873f53c4aSTejun Heo */ 27296183c009STejun Heo WARN_ON_ONCE(wq->flush_color == wq->work_color); 27306183c009STejun Heo WARN_ON_ONCE(wq->flush_color != next->flush_color); 273173f53c4aSTejun Heo 273273f53c4aSTejun Heo list_del_init(&next->list); 273373f53c4aSTejun Heo wq->first_flusher = next; 273473f53c4aSTejun Heo 2735112202d9STejun Heo if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1)) 273673f53c4aSTejun Heo break; 273773f53c4aSTejun Heo 273873f53c4aSTejun Heo /* 273973f53c4aSTejun Heo * Meh... this color is already done, clear first 274073f53c4aSTejun Heo * flusher and repeat cascading. 274173f53c4aSTejun Heo */ 274273f53c4aSTejun Heo wq->first_flusher = NULL; 274373f53c4aSTejun Heo } 274473f53c4aSTejun Heo 274573f53c4aSTejun Heo out_unlock: 274673f53c4aSTejun Heo mutex_unlock(&wq->flush_mutex); 27471da177e4SLinus Torvalds } 2748ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue); 27491da177e4SLinus Torvalds 27509c5a2ba7STejun Heo /** 27519c5a2ba7STejun Heo * drain_workqueue - drain a workqueue 27529c5a2ba7STejun Heo * @wq: workqueue to drain 27539c5a2ba7STejun Heo * 27549c5a2ba7STejun Heo * Wait until the workqueue becomes empty. While draining is in progress, 27559c5a2ba7STejun Heo * only chain queueing is allowed. IOW, only currently pending or running 27569c5a2ba7STejun Heo * work items on @wq can queue further work items on it. @wq is flushed 27579c5a2ba7STejun Heo * repeatedly until it becomes empty. The number of flushing is detemined 27589c5a2ba7STejun Heo * by the depth of chaining and should be relatively short. Whine if it 27599c5a2ba7STejun Heo * takes too long. 27609c5a2ba7STejun Heo */ 27619c5a2ba7STejun Heo void drain_workqueue(struct workqueue_struct *wq) 27629c5a2ba7STejun Heo { 27639c5a2ba7STejun Heo unsigned int flush_cnt = 0; 276449e3cf44STejun Heo struct pool_workqueue *pwq; 27659c5a2ba7STejun Heo 27669c5a2ba7STejun Heo /* 27679c5a2ba7STejun Heo * __queue_work() needs to test whether there are drainers, is much 27689c5a2ba7STejun Heo * hotter than drain_workqueue() and already looks at @wq->flags. 2769618b01ebSTejun Heo * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers. 27709c5a2ba7STejun Heo */ 2771e98d5b16STejun Heo spin_lock_irq(&workqueue_lock); 27729c5a2ba7STejun Heo if (!wq->nr_drainers++) 2773618b01ebSTejun Heo wq->flags |= __WQ_DRAINING; 2774e98d5b16STejun Heo spin_unlock_irq(&workqueue_lock); 27759c5a2ba7STejun Heo reflush: 27769c5a2ba7STejun Heo flush_workqueue(wq); 27779c5a2ba7STejun Heo 277876af4d93STejun Heo local_irq_disable(); 277976af4d93STejun Heo 278049e3cf44STejun Heo for_each_pwq(pwq, wq) { 2781fa2563e4SThomas Tuttle bool drained; 27829c5a2ba7STejun Heo 278376af4d93STejun Heo spin_lock(&pwq->pool->lock); 2784112202d9STejun Heo drained = !pwq->nr_active && list_empty(&pwq->delayed_works); 278576af4d93STejun Heo spin_unlock(&pwq->pool->lock); 2786fa2563e4SThomas Tuttle 2787fa2563e4SThomas Tuttle if (drained) 27889c5a2ba7STejun Heo continue; 27899c5a2ba7STejun Heo 27909c5a2ba7STejun Heo if (++flush_cnt == 10 || 27919c5a2ba7STejun Heo (flush_cnt % 100 == 0 && flush_cnt <= 1000)) 2792c5aa87bbSTejun Heo pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n", 27939c5a2ba7STejun Heo wq->name, flush_cnt); 279476af4d93STejun Heo 279576af4d93STejun Heo local_irq_enable(); 27969c5a2ba7STejun Heo goto reflush; 27979c5a2ba7STejun Heo } 27989c5a2ba7STejun Heo 279976af4d93STejun Heo spin_lock(&workqueue_lock); 28009c5a2ba7STejun Heo if (!--wq->nr_drainers) 2801618b01ebSTejun Heo wq->flags &= ~__WQ_DRAINING; 280276af4d93STejun Heo spin_unlock(&workqueue_lock); 280376af4d93STejun Heo 280476af4d93STejun Heo local_irq_enable(); 28059c5a2ba7STejun Heo } 28069c5a2ba7STejun Heo EXPORT_SYMBOL_GPL(drain_workqueue); 28079c5a2ba7STejun Heo 2808606a5020STejun Heo static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) 2809baf59022STejun Heo { 2810baf59022STejun Heo struct worker *worker = NULL; 2811c9e7cf27STejun Heo struct worker_pool *pool; 2812112202d9STejun Heo struct pool_workqueue *pwq; 2813baf59022STejun Heo 2814baf59022STejun Heo might_sleep(); 2815baf59022STejun Heo 2816fa1b54e6STejun Heo local_irq_disable(); 2817fa1b54e6STejun Heo pool = get_work_pool(work); 2818fa1b54e6STejun Heo if (!pool) { 2819fa1b54e6STejun Heo local_irq_enable(); 2820fa1b54e6STejun Heo return false; 2821fa1b54e6STejun Heo } 2822fa1b54e6STejun Heo 2823fa1b54e6STejun Heo spin_lock(&pool->lock); 28240b3dae68SLai Jiangshan /* see the comment in try_to_grab_pending() with the same code */ 2825112202d9STejun Heo pwq = get_work_pwq(work); 2826112202d9STejun Heo if (pwq) { 2827112202d9STejun Heo if (unlikely(pwq->pool != pool)) 2828baf59022STejun Heo goto already_gone; 2829606a5020STejun Heo } else { 2830c9e7cf27STejun Heo worker = find_worker_executing_work(pool, work); 2831baf59022STejun Heo if (!worker) 2832baf59022STejun Heo goto already_gone; 2833112202d9STejun Heo pwq = worker->current_pwq; 2834606a5020STejun Heo } 2835baf59022STejun Heo 2836112202d9STejun Heo insert_wq_barrier(pwq, barr, work, worker); 2837d565ed63STejun Heo spin_unlock_irq(&pool->lock); 2838baf59022STejun Heo 2839e159489bSTejun Heo /* 2840e159489bSTejun Heo * If @max_active is 1 or rescuer is in use, flushing another work 2841e159489bSTejun Heo * item on the same workqueue may lead to deadlock. Make sure the 2842e159489bSTejun Heo * flusher is not running on the same workqueue by verifying write 2843e159489bSTejun Heo * access. 2844e159489bSTejun Heo */ 2845493008a8STejun Heo if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer) 2846112202d9STejun Heo lock_map_acquire(&pwq->wq->lockdep_map); 2847e159489bSTejun Heo else 2848112202d9STejun Heo lock_map_acquire_read(&pwq->wq->lockdep_map); 2849112202d9STejun Heo lock_map_release(&pwq->wq->lockdep_map); 2850e159489bSTejun Heo 2851baf59022STejun Heo return true; 2852baf59022STejun Heo already_gone: 2853d565ed63STejun Heo spin_unlock_irq(&pool->lock); 2854baf59022STejun Heo return false; 2855baf59022STejun Heo } 2856baf59022STejun Heo 2857db700897SOleg Nesterov /** 2858401a8d04STejun Heo * flush_work - wait for a work to finish executing the last queueing instance 2859401a8d04STejun Heo * @work: the work to flush 2860db700897SOleg Nesterov * 2861606a5020STejun Heo * Wait until @work has finished execution. @work is guaranteed to be idle 2862606a5020STejun Heo * on return if it hasn't been requeued since flush started. 2863401a8d04STejun Heo * 2864401a8d04STejun Heo * RETURNS: 2865401a8d04STejun Heo * %true if flush_work() waited for the work to finish execution, 2866401a8d04STejun Heo * %false if it was already idle. 2867db700897SOleg Nesterov */ 2868401a8d04STejun Heo bool flush_work(struct work_struct *work) 2869db700897SOleg Nesterov { 2870db700897SOleg Nesterov struct wq_barrier barr; 2871db700897SOleg Nesterov 28720976dfc1SStephen Boyd lock_map_acquire(&work->lockdep_map); 28730976dfc1SStephen Boyd lock_map_release(&work->lockdep_map); 28740976dfc1SStephen Boyd 2875606a5020STejun Heo if (start_flush_work(work, &barr)) { 2876db700897SOleg Nesterov wait_for_completion(&barr.done); 2877dc186ad7SThomas Gleixner destroy_work_on_stack(&barr.work); 2878401a8d04STejun Heo return true; 2879606a5020STejun Heo } else { 2880401a8d04STejun Heo return false; 2881db700897SOleg Nesterov } 2882606a5020STejun Heo } 2883db700897SOleg Nesterov EXPORT_SYMBOL_GPL(flush_work); 2884db700897SOleg Nesterov 288536e227d2STejun Heo static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) 2886401a8d04STejun Heo { 2887bbb68dfaSTejun Heo unsigned long flags; 28881f1f642eSOleg Nesterov int ret; 28891f1f642eSOleg Nesterov 28901f1f642eSOleg Nesterov do { 2891bbb68dfaSTejun Heo ret = try_to_grab_pending(work, is_dwork, &flags); 2892bbb68dfaSTejun Heo /* 2893bbb68dfaSTejun Heo * If someone else is canceling, wait for the same event it 2894bbb68dfaSTejun Heo * would be waiting for before retrying. 2895bbb68dfaSTejun Heo */ 2896bbb68dfaSTejun Heo if (unlikely(ret == -ENOENT)) 2897606a5020STejun Heo flush_work(work); 28981f1f642eSOleg Nesterov } while (unlikely(ret < 0)); 28991f1f642eSOleg Nesterov 2900bbb68dfaSTejun Heo /* tell other tasks trying to grab @work to back off */ 2901bbb68dfaSTejun Heo mark_work_canceling(work); 2902bbb68dfaSTejun Heo local_irq_restore(flags); 2903bbb68dfaSTejun Heo 2904606a5020STejun Heo flush_work(work); 29057a22ad75STejun Heo clear_work_data(work); 29061f1f642eSOleg Nesterov return ret; 29071f1f642eSOleg Nesterov } 29081f1f642eSOleg Nesterov 29096e84d644SOleg Nesterov /** 2910401a8d04STejun Heo * cancel_work_sync - cancel a work and wait for it to finish 2911401a8d04STejun Heo * @work: the work to cancel 29126e84d644SOleg Nesterov * 2913401a8d04STejun Heo * Cancel @work and wait for its execution to finish. This function 2914401a8d04STejun Heo * can be used even if the work re-queues itself or migrates to 2915401a8d04STejun Heo * another workqueue. On return from this function, @work is 2916401a8d04STejun Heo * guaranteed to be not pending or executing on any CPU. 29171f1f642eSOleg Nesterov * 2918401a8d04STejun Heo * cancel_work_sync(&delayed_work->work) must not be used for 2919401a8d04STejun Heo * delayed_work's. Use cancel_delayed_work_sync() instead. 29206e84d644SOleg Nesterov * 2921401a8d04STejun Heo * The caller must ensure that the workqueue on which @work was last 29226e84d644SOleg Nesterov * queued can't be destroyed before this function returns. 2923401a8d04STejun Heo * 2924401a8d04STejun Heo * RETURNS: 2925401a8d04STejun Heo * %true if @work was pending, %false otherwise. 29266e84d644SOleg Nesterov */ 2927401a8d04STejun Heo bool cancel_work_sync(struct work_struct *work) 29286e84d644SOleg Nesterov { 292936e227d2STejun Heo return __cancel_work_timer(work, false); 2930b89deed3SOleg Nesterov } 293128e53bddSOleg Nesterov EXPORT_SYMBOL_GPL(cancel_work_sync); 2932b89deed3SOleg Nesterov 29336e84d644SOleg Nesterov /** 2934401a8d04STejun Heo * flush_delayed_work - wait for a dwork to finish executing the last queueing 2935401a8d04STejun Heo * @dwork: the delayed work to flush 29366e84d644SOleg Nesterov * 2937401a8d04STejun Heo * Delayed timer is cancelled and the pending work is queued for 2938401a8d04STejun Heo * immediate execution. Like flush_work(), this function only 2939401a8d04STejun Heo * considers the last queueing instance of @dwork. 29401f1f642eSOleg Nesterov * 2941401a8d04STejun Heo * RETURNS: 2942401a8d04STejun Heo * %true if flush_work() waited for the work to finish execution, 2943401a8d04STejun Heo * %false if it was already idle. 29446e84d644SOleg Nesterov */ 2945401a8d04STejun Heo bool flush_delayed_work(struct delayed_work *dwork) 2946401a8d04STejun Heo { 29478930cabaSTejun Heo local_irq_disable(); 2948401a8d04STejun Heo if (del_timer_sync(&dwork->timer)) 294960c057bcSLai Jiangshan __queue_work(dwork->cpu, dwork->wq, &dwork->work); 29508930cabaSTejun Heo local_irq_enable(); 2951401a8d04STejun Heo return flush_work(&dwork->work); 2952401a8d04STejun Heo } 2953401a8d04STejun Heo EXPORT_SYMBOL(flush_delayed_work); 2954401a8d04STejun Heo 2955401a8d04STejun Heo /** 295657b30ae7STejun Heo * cancel_delayed_work - cancel a delayed work 295757b30ae7STejun Heo * @dwork: delayed_work to cancel 295809383498STejun Heo * 295957b30ae7STejun Heo * Kill off a pending delayed_work. Returns %true if @dwork was pending 296057b30ae7STejun Heo * and canceled; %false if wasn't pending. Note that the work callback 296157b30ae7STejun Heo * function may still be running on return, unless it returns %true and the 296257b30ae7STejun Heo * work doesn't re-arm itself. Explicitly flush or use 296357b30ae7STejun Heo * cancel_delayed_work_sync() to wait on it. 296409383498STejun Heo * 296557b30ae7STejun Heo * This function is safe to call from any context including IRQ handler. 296609383498STejun Heo */ 296757b30ae7STejun Heo bool cancel_delayed_work(struct delayed_work *dwork) 296809383498STejun Heo { 296957b30ae7STejun Heo unsigned long flags; 297057b30ae7STejun Heo int ret; 297157b30ae7STejun Heo 297257b30ae7STejun Heo do { 297357b30ae7STejun Heo ret = try_to_grab_pending(&dwork->work, true, &flags); 297457b30ae7STejun Heo } while (unlikely(ret == -EAGAIN)); 297557b30ae7STejun Heo 297657b30ae7STejun Heo if (unlikely(ret < 0)) 297757b30ae7STejun Heo return false; 297857b30ae7STejun Heo 29797c3eed5cSTejun Heo set_work_pool_and_clear_pending(&dwork->work, 29807c3eed5cSTejun Heo get_work_pool_id(&dwork->work)); 298157b30ae7STejun Heo local_irq_restore(flags); 2982c0158ca6SDan Magenheimer return ret; 298309383498STejun Heo } 298457b30ae7STejun Heo EXPORT_SYMBOL(cancel_delayed_work); 298509383498STejun Heo 298609383498STejun Heo /** 2987401a8d04STejun Heo * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish 2988401a8d04STejun Heo * @dwork: the delayed work cancel 2989401a8d04STejun Heo * 2990401a8d04STejun Heo * This is cancel_work_sync() for delayed works. 2991401a8d04STejun Heo * 2992401a8d04STejun Heo * RETURNS: 2993401a8d04STejun Heo * %true if @dwork was pending, %false otherwise. 2994401a8d04STejun Heo */ 2995401a8d04STejun Heo bool cancel_delayed_work_sync(struct delayed_work *dwork) 29966e84d644SOleg Nesterov { 299736e227d2STejun Heo return __cancel_work_timer(&dwork->work, true); 29986e84d644SOleg Nesterov } 2999f5a421a4SOleg Nesterov EXPORT_SYMBOL(cancel_delayed_work_sync); 30001da177e4SLinus Torvalds 30010fcb78c2SRolf Eike Beer /** 300231ddd871STejun Heo * schedule_on_each_cpu - execute a function synchronously on each online CPU 3003b6136773SAndrew Morton * @func: the function to call 3004b6136773SAndrew Morton * 300531ddd871STejun Heo * schedule_on_each_cpu() executes @func on each online CPU using the 300631ddd871STejun Heo * system workqueue and blocks until all CPUs have completed. 3007b6136773SAndrew Morton * schedule_on_each_cpu() is very slow. 300831ddd871STejun Heo * 300931ddd871STejun Heo * RETURNS: 301031ddd871STejun Heo * 0 on success, -errno on failure. 3011b6136773SAndrew Morton */ 301265f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func) 301315316ba8SChristoph Lameter { 301415316ba8SChristoph Lameter int cpu; 301538f51568SNamhyung Kim struct work_struct __percpu *works; 301615316ba8SChristoph Lameter 3017b6136773SAndrew Morton works = alloc_percpu(struct work_struct); 3018b6136773SAndrew Morton if (!works) 301915316ba8SChristoph Lameter return -ENOMEM; 3020b6136773SAndrew Morton 302195402b38SGautham R Shenoy get_online_cpus(); 302293981800STejun Heo 302315316ba8SChristoph Lameter for_each_online_cpu(cpu) { 30249bfb1839SIngo Molnar struct work_struct *work = per_cpu_ptr(works, cpu); 30259bfb1839SIngo Molnar 30269bfb1839SIngo Molnar INIT_WORK(work, func); 30278de6d308SOleg Nesterov schedule_work_on(cpu, work); 302815316ba8SChristoph Lameter } 302993981800STejun Heo 303093981800STejun Heo for_each_online_cpu(cpu) 30318616a89aSOleg Nesterov flush_work(per_cpu_ptr(works, cpu)); 303293981800STejun Heo 303395402b38SGautham R Shenoy put_online_cpus(); 3034b6136773SAndrew Morton free_percpu(works); 303515316ba8SChristoph Lameter return 0; 303615316ba8SChristoph Lameter } 303715316ba8SChristoph Lameter 3038eef6a7d5SAlan Stern /** 3039eef6a7d5SAlan Stern * flush_scheduled_work - ensure that any scheduled work has run to completion. 3040eef6a7d5SAlan Stern * 3041eef6a7d5SAlan Stern * Forces execution of the kernel-global workqueue and blocks until its 3042eef6a7d5SAlan Stern * completion. 3043eef6a7d5SAlan Stern * 3044eef6a7d5SAlan Stern * Think twice before calling this function! It's very easy to get into 3045eef6a7d5SAlan Stern * trouble if you don't take great care. Either of the following situations 3046eef6a7d5SAlan Stern * will lead to deadlock: 3047eef6a7d5SAlan Stern * 3048eef6a7d5SAlan Stern * One of the work items currently on the workqueue needs to acquire 3049eef6a7d5SAlan Stern * a lock held by your code or its caller. 3050eef6a7d5SAlan Stern * 3051eef6a7d5SAlan Stern * Your code is running in the context of a work routine. 3052eef6a7d5SAlan Stern * 3053eef6a7d5SAlan Stern * They will be detected by lockdep when they occur, but the first might not 3054eef6a7d5SAlan Stern * occur very often. It depends on what work items are on the workqueue and 3055eef6a7d5SAlan Stern * what locks they need, which you have no control over. 3056eef6a7d5SAlan Stern * 3057eef6a7d5SAlan Stern * In most situations flushing the entire workqueue is overkill; you merely 3058eef6a7d5SAlan Stern * need to know that a particular work item isn't queued and isn't running. 3059eef6a7d5SAlan Stern * In such cases you should use cancel_delayed_work_sync() or 3060eef6a7d5SAlan Stern * cancel_work_sync() instead. 3061eef6a7d5SAlan Stern */ 30621da177e4SLinus Torvalds void flush_scheduled_work(void) 30631da177e4SLinus Torvalds { 3064d320c038STejun Heo flush_workqueue(system_wq); 30651da177e4SLinus Torvalds } 3066ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work); 30671da177e4SLinus Torvalds 30681da177e4SLinus Torvalds /** 30691fa44ecaSJames Bottomley * execute_in_process_context - reliably execute the routine with user context 30701fa44ecaSJames Bottomley * @fn: the function to execute 30711fa44ecaSJames Bottomley * @ew: guaranteed storage for the execute work structure (must 30721fa44ecaSJames Bottomley * be available when the work executes) 30731fa44ecaSJames Bottomley * 30741fa44ecaSJames Bottomley * Executes the function immediately if process context is available, 30751fa44ecaSJames Bottomley * otherwise schedules the function for delayed execution. 30761fa44ecaSJames Bottomley * 30771fa44ecaSJames Bottomley * Returns: 0 - function was executed 30781fa44ecaSJames Bottomley * 1 - function was scheduled for execution 30791fa44ecaSJames Bottomley */ 308065f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew) 30811fa44ecaSJames Bottomley { 30821fa44ecaSJames Bottomley if (!in_interrupt()) { 308365f27f38SDavid Howells fn(&ew->work); 30841fa44ecaSJames Bottomley return 0; 30851fa44ecaSJames Bottomley } 30861fa44ecaSJames Bottomley 308765f27f38SDavid Howells INIT_WORK(&ew->work, fn); 30881fa44ecaSJames Bottomley schedule_work(&ew->work); 30891fa44ecaSJames Bottomley 30901fa44ecaSJames Bottomley return 1; 30911fa44ecaSJames Bottomley } 30921fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context); 30931fa44ecaSJames Bottomley 3094226223abSTejun Heo #ifdef CONFIG_SYSFS 3095226223abSTejun Heo /* 3096226223abSTejun Heo * Workqueues with WQ_SYSFS flag set is visible to userland via 3097226223abSTejun Heo * /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the 3098226223abSTejun Heo * following attributes. 3099226223abSTejun Heo * 3100226223abSTejun Heo * per_cpu RO bool : whether the workqueue is per-cpu or unbound 3101226223abSTejun Heo * max_active RW int : maximum number of in-flight work items 3102226223abSTejun Heo * 3103226223abSTejun Heo * Unbound workqueues have the following extra attributes. 3104226223abSTejun Heo * 3105226223abSTejun Heo * id RO int : the associated pool ID 3106226223abSTejun Heo * nice RW int : nice value of the workers 3107226223abSTejun Heo * cpumask RW mask : bitmask of allowed CPUs for the workers 3108226223abSTejun Heo */ 3109226223abSTejun Heo struct wq_device { 3110226223abSTejun Heo struct workqueue_struct *wq; 3111226223abSTejun Heo struct device dev; 3112226223abSTejun Heo }; 3113226223abSTejun Heo 3114226223abSTejun Heo static struct workqueue_struct *dev_to_wq(struct device *dev) 3115226223abSTejun Heo { 3116226223abSTejun Heo struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); 3117226223abSTejun Heo 3118226223abSTejun Heo return wq_dev->wq; 3119226223abSTejun Heo } 3120226223abSTejun Heo 3121226223abSTejun Heo static ssize_t wq_per_cpu_show(struct device *dev, 3122226223abSTejun Heo struct device_attribute *attr, char *buf) 3123226223abSTejun Heo { 3124226223abSTejun Heo struct workqueue_struct *wq = dev_to_wq(dev); 3125226223abSTejun Heo 3126226223abSTejun Heo return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND)); 3127226223abSTejun Heo } 3128226223abSTejun Heo 3129226223abSTejun Heo static ssize_t wq_max_active_show(struct device *dev, 3130226223abSTejun Heo struct device_attribute *attr, char *buf) 3131226223abSTejun Heo { 3132226223abSTejun Heo struct workqueue_struct *wq = dev_to_wq(dev); 3133226223abSTejun Heo 3134226223abSTejun Heo return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active); 3135226223abSTejun Heo } 3136226223abSTejun Heo 3137226223abSTejun Heo static ssize_t wq_max_active_store(struct device *dev, 3138226223abSTejun Heo struct device_attribute *attr, 3139226223abSTejun Heo const char *buf, size_t count) 3140226223abSTejun Heo { 3141226223abSTejun Heo struct workqueue_struct *wq = dev_to_wq(dev); 3142226223abSTejun Heo int val; 3143226223abSTejun Heo 3144226223abSTejun Heo if (sscanf(buf, "%d", &val) != 1 || val <= 0) 3145226223abSTejun Heo return -EINVAL; 3146226223abSTejun Heo 3147226223abSTejun Heo workqueue_set_max_active(wq, val); 3148226223abSTejun Heo return count; 3149226223abSTejun Heo } 3150226223abSTejun Heo 3151226223abSTejun Heo static struct device_attribute wq_sysfs_attrs[] = { 3152226223abSTejun Heo __ATTR(per_cpu, 0444, wq_per_cpu_show, NULL), 3153226223abSTejun Heo __ATTR(max_active, 0644, wq_max_active_show, wq_max_active_store), 3154226223abSTejun Heo __ATTR_NULL, 3155226223abSTejun Heo }; 3156226223abSTejun Heo 3157226223abSTejun Heo static ssize_t wq_pool_id_show(struct device *dev, 3158226223abSTejun Heo struct device_attribute *attr, char *buf) 3159226223abSTejun Heo { 3160226223abSTejun Heo struct workqueue_struct *wq = dev_to_wq(dev); 3161226223abSTejun Heo struct worker_pool *pool; 3162226223abSTejun Heo int written; 3163226223abSTejun Heo 3164226223abSTejun Heo rcu_read_lock_sched(); 3165226223abSTejun Heo pool = first_pwq(wq)->pool; 3166226223abSTejun Heo written = scnprintf(buf, PAGE_SIZE, "%d\n", pool->id); 3167226223abSTejun Heo rcu_read_unlock_sched(); 3168226223abSTejun Heo 3169226223abSTejun Heo return written; 3170226223abSTejun Heo } 3171226223abSTejun Heo 3172226223abSTejun Heo static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr, 3173226223abSTejun Heo char *buf) 3174226223abSTejun Heo { 3175226223abSTejun Heo struct workqueue_struct *wq = dev_to_wq(dev); 3176226223abSTejun Heo int written; 3177226223abSTejun Heo 3178226223abSTejun Heo rcu_read_lock_sched(); 3179226223abSTejun Heo written = scnprintf(buf, PAGE_SIZE, "%d\n", 3180226223abSTejun Heo first_pwq(wq)->pool->attrs->nice); 3181226223abSTejun Heo rcu_read_unlock_sched(); 3182226223abSTejun Heo 3183226223abSTejun Heo return written; 3184226223abSTejun Heo } 3185226223abSTejun Heo 3186226223abSTejun Heo /* prepare workqueue_attrs for sysfs store operations */ 3187226223abSTejun Heo static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq) 3188226223abSTejun Heo { 3189226223abSTejun Heo struct workqueue_attrs *attrs; 3190226223abSTejun Heo 3191226223abSTejun Heo attrs = alloc_workqueue_attrs(GFP_KERNEL); 3192226223abSTejun Heo if (!attrs) 3193226223abSTejun Heo return NULL; 3194226223abSTejun Heo 3195226223abSTejun Heo rcu_read_lock_sched(); 3196226223abSTejun Heo copy_workqueue_attrs(attrs, first_pwq(wq)->pool->attrs); 3197226223abSTejun Heo rcu_read_unlock_sched(); 3198226223abSTejun Heo return attrs; 3199226223abSTejun Heo } 3200226223abSTejun Heo 3201226223abSTejun Heo static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr, 3202226223abSTejun Heo const char *buf, size_t count) 3203226223abSTejun Heo { 3204226223abSTejun Heo struct workqueue_struct *wq = dev_to_wq(dev); 3205226223abSTejun Heo struct workqueue_attrs *attrs; 3206226223abSTejun Heo int ret; 3207226223abSTejun Heo 3208226223abSTejun Heo attrs = wq_sysfs_prep_attrs(wq); 3209226223abSTejun Heo if (!attrs) 3210226223abSTejun Heo return -ENOMEM; 3211226223abSTejun Heo 3212226223abSTejun Heo if (sscanf(buf, "%d", &attrs->nice) == 1 && 3213226223abSTejun Heo attrs->nice >= -20 && attrs->nice <= 19) 3214226223abSTejun Heo ret = apply_workqueue_attrs(wq, attrs); 3215226223abSTejun Heo else 3216226223abSTejun Heo ret = -EINVAL; 3217226223abSTejun Heo 3218226223abSTejun Heo free_workqueue_attrs(attrs); 3219226223abSTejun Heo return ret ?: count; 3220226223abSTejun Heo } 3221226223abSTejun Heo 3222226223abSTejun Heo static ssize_t wq_cpumask_show(struct device *dev, 3223226223abSTejun Heo struct device_attribute *attr, char *buf) 3224226223abSTejun Heo { 3225226223abSTejun Heo struct workqueue_struct *wq = dev_to_wq(dev); 3226226223abSTejun Heo int written; 3227226223abSTejun Heo 3228226223abSTejun Heo rcu_read_lock_sched(); 3229226223abSTejun Heo written = cpumask_scnprintf(buf, PAGE_SIZE, 3230226223abSTejun Heo first_pwq(wq)->pool->attrs->cpumask); 3231226223abSTejun Heo rcu_read_unlock_sched(); 3232226223abSTejun Heo 3233226223abSTejun Heo written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); 3234226223abSTejun Heo return written; 3235226223abSTejun Heo } 3236226223abSTejun Heo 3237226223abSTejun Heo static ssize_t wq_cpumask_store(struct device *dev, 3238226223abSTejun Heo struct device_attribute *attr, 3239226223abSTejun Heo const char *buf, size_t count) 3240226223abSTejun Heo { 3241226223abSTejun Heo struct workqueue_struct *wq = dev_to_wq(dev); 3242226223abSTejun Heo struct workqueue_attrs *attrs; 3243226223abSTejun Heo int ret; 3244226223abSTejun Heo 3245226223abSTejun Heo attrs = wq_sysfs_prep_attrs(wq); 3246226223abSTejun Heo if (!attrs) 3247226223abSTejun Heo return -ENOMEM; 3248226223abSTejun Heo 3249226223abSTejun Heo ret = cpumask_parse(buf, attrs->cpumask); 3250226223abSTejun Heo if (!ret) 3251226223abSTejun Heo ret = apply_workqueue_attrs(wq, attrs); 3252226223abSTejun Heo 3253226223abSTejun Heo free_workqueue_attrs(attrs); 3254226223abSTejun Heo return ret ?: count; 3255226223abSTejun Heo } 3256226223abSTejun Heo 3257226223abSTejun Heo static struct device_attribute wq_sysfs_unbound_attrs[] = { 3258226223abSTejun Heo __ATTR(pool_id, 0444, wq_pool_id_show, NULL), 3259226223abSTejun Heo __ATTR(nice, 0644, wq_nice_show, wq_nice_store), 3260226223abSTejun Heo __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store), 3261226223abSTejun Heo __ATTR_NULL, 3262226223abSTejun Heo }; 3263226223abSTejun Heo 3264226223abSTejun Heo static struct bus_type wq_subsys = { 3265226223abSTejun Heo .name = "workqueue", 3266226223abSTejun Heo .dev_attrs = wq_sysfs_attrs, 3267226223abSTejun Heo }; 3268226223abSTejun Heo 3269226223abSTejun Heo static int __init wq_sysfs_init(void) 3270226223abSTejun Heo { 3271226223abSTejun Heo return subsys_virtual_register(&wq_subsys, NULL); 3272226223abSTejun Heo } 3273226223abSTejun Heo core_initcall(wq_sysfs_init); 3274226223abSTejun Heo 3275226223abSTejun Heo static void wq_device_release(struct device *dev) 3276226223abSTejun Heo { 3277226223abSTejun Heo struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); 3278226223abSTejun Heo 3279226223abSTejun Heo kfree(wq_dev); 3280226223abSTejun Heo } 3281226223abSTejun Heo 3282226223abSTejun Heo /** 3283226223abSTejun Heo * workqueue_sysfs_register - make a workqueue visible in sysfs 3284226223abSTejun Heo * @wq: the workqueue to register 3285226223abSTejun Heo * 3286226223abSTejun Heo * Expose @wq in sysfs under /sys/bus/workqueue/devices. 3287226223abSTejun Heo * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set 3288226223abSTejun Heo * which is the preferred method. 3289226223abSTejun Heo * 3290226223abSTejun Heo * Workqueue user should use this function directly iff it wants to apply 3291226223abSTejun Heo * workqueue_attrs before making the workqueue visible in sysfs; otherwise, 3292226223abSTejun Heo * apply_workqueue_attrs() may race against userland updating the 3293226223abSTejun Heo * attributes. 3294226223abSTejun Heo * 3295226223abSTejun Heo * Returns 0 on success, -errno on failure. 3296226223abSTejun Heo */ 3297226223abSTejun Heo int workqueue_sysfs_register(struct workqueue_struct *wq) 3298226223abSTejun Heo { 3299226223abSTejun Heo struct wq_device *wq_dev; 3300226223abSTejun Heo int ret; 3301226223abSTejun Heo 3302226223abSTejun Heo /* 3303226223abSTejun Heo * Adjusting max_active or creating new pwqs by applyting 3304226223abSTejun Heo * attributes breaks ordering guarantee. Disallow exposing ordered 3305226223abSTejun Heo * workqueues. 3306226223abSTejun Heo */ 3307226223abSTejun Heo if (WARN_ON(wq->flags & __WQ_ORDERED)) 3308226223abSTejun Heo return -EINVAL; 3309226223abSTejun Heo 3310226223abSTejun Heo wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); 3311226223abSTejun Heo if (!wq_dev) 3312226223abSTejun Heo return -ENOMEM; 3313226223abSTejun Heo 3314226223abSTejun Heo wq_dev->wq = wq; 3315226223abSTejun Heo wq_dev->dev.bus = &wq_subsys; 3316226223abSTejun Heo wq_dev->dev.init_name = wq->name; 3317226223abSTejun Heo wq_dev->dev.release = wq_device_release; 3318226223abSTejun Heo 3319226223abSTejun Heo /* 3320226223abSTejun Heo * unbound_attrs are created separately. Suppress uevent until 3321226223abSTejun Heo * everything is ready. 3322226223abSTejun Heo */ 3323226223abSTejun Heo dev_set_uevent_suppress(&wq_dev->dev, true); 3324226223abSTejun Heo 3325226223abSTejun Heo ret = device_register(&wq_dev->dev); 3326226223abSTejun Heo if (ret) { 3327226223abSTejun Heo kfree(wq_dev); 3328226223abSTejun Heo wq->wq_dev = NULL; 3329226223abSTejun Heo return ret; 3330226223abSTejun Heo } 3331226223abSTejun Heo 3332226223abSTejun Heo if (wq->flags & WQ_UNBOUND) { 3333226223abSTejun Heo struct device_attribute *attr; 3334226223abSTejun Heo 3335226223abSTejun Heo for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) { 3336226223abSTejun Heo ret = device_create_file(&wq_dev->dev, attr); 3337226223abSTejun Heo if (ret) { 3338226223abSTejun Heo device_unregister(&wq_dev->dev); 3339226223abSTejun Heo wq->wq_dev = NULL; 3340226223abSTejun Heo return ret; 3341226223abSTejun Heo } 3342226223abSTejun Heo } 3343226223abSTejun Heo } 3344226223abSTejun Heo 3345226223abSTejun Heo kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD); 3346226223abSTejun Heo return 0; 3347226223abSTejun Heo } 3348226223abSTejun Heo 3349226223abSTejun Heo /** 3350226223abSTejun Heo * workqueue_sysfs_unregister - undo workqueue_sysfs_register() 3351226223abSTejun Heo * @wq: the workqueue to unregister 3352226223abSTejun Heo * 3353226223abSTejun Heo * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister. 3354226223abSTejun Heo */ 3355226223abSTejun Heo static void workqueue_sysfs_unregister(struct workqueue_struct *wq) 3356226223abSTejun Heo { 3357226223abSTejun Heo struct wq_device *wq_dev = wq->wq_dev; 3358226223abSTejun Heo 3359226223abSTejun Heo if (!wq->wq_dev) 3360226223abSTejun Heo return; 3361226223abSTejun Heo 3362226223abSTejun Heo wq->wq_dev = NULL; 3363226223abSTejun Heo device_unregister(&wq_dev->dev); 3364226223abSTejun Heo } 3365226223abSTejun Heo #else /* CONFIG_SYSFS */ 3366226223abSTejun Heo static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { } 3367226223abSTejun Heo #endif /* CONFIG_SYSFS */ 3368226223abSTejun Heo 33697a4e344cSTejun Heo /** 33707a4e344cSTejun Heo * free_workqueue_attrs - free a workqueue_attrs 33717a4e344cSTejun Heo * @attrs: workqueue_attrs to free 33727a4e344cSTejun Heo * 33737a4e344cSTejun Heo * Undo alloc_workqueue_attrs(). 33747a4e344cSTejun Heo */ 33757a4e344cSTejun Heo void free_workqueue_attrs(struct workqueue_attrs *attrs) 33767a4e344cSTejun Heo { 33777a4e344cSTejun Heo if (attrs) { 33787a4e344cSTejun Heo free_cpumask_var(attrs->cpumask); 33797a4e344cSTejun Heo kfree(attrs); 33807a4e344cSTejun Heo } 33817a4e344cSTejun Heo } 33827a4e344cSTejun Heo 33837a4e344cSTejun Heo /** 33847a4e344cSTejun Heo * alloc_workqueue_attrs - allocate a workqueue_attrs 33857a4e344cSTejun Heo * @gfp_mask: allocation mask to use 33867a4e344cSTejun Heo * 33877a4e344cSTejun Heo * Allocate a new workqueue_attrs, initialize with default settings and 33887a4e344cSTejun Heo * return it. Returns NULL on failure. 33897a4e344cSTejun Heo */ 33907a4e344cSTejun Heo struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask) 33917a4e344cSTejun Heo { 33927a4e344cSTejun Heo struct workqueue_attrs *attrs; 33937a4e344cSTejun Heo 33947a4e344cSTejun Heo attrs = kzalloc(sizeof(*attrs), gfp_mask); 33957a4e344cSTejun Heo if (!attrs) 33967a4e344cSTejun Heo goto fail; 33977a4e344cSTejun Heo if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask)) 33987a4e344cSTejun Heo goto fail; 33997a4e344cSTejun Heo 34007a4e344cSTejun Heo cpumask_setall(attrs->cpumask); 34017a4e344cSTejun Heo return attrs; 34027a4e344cSTejun Heo fail: 34037a4e344cSTejun Heo free_workqueue_attrs(attrs); 34047a4e344cSTejun Heo return NULL; 34057a4e344cSTejun Heo } 34067a4e344cSTejun Heo 340729c91e99STejun Heo static void copy_workqueue_attrs(struct workqueue_attrs *to, 340829c91e99STejun Heo const struct workqueue_attrs *from) 340929c91e99STejun Heo { 341029c91e99STejun Heo to->nice = from->nice; 341129c91e99STejun Heo cpumask_copy(to->cpumask, from->cpumask); 341229c91e99STejun Heo } 341329c91e99STejun Heo 341429c91e99STejun Heo /* 341529c91e99STejun Heo * Hacky implementation of jhash of bitmaps which only considers the 341629c91e99STejun Heo * specified number of bits. We probably want a proper implementation in 341729c91e99STejun Heo * include/linux/jhash.h. 341829c91e99STejun Heo */ 341929c91e99STejun Heo static u32 jhash_bitmap(const unsigned long *bitmap, int bits, u32 hash) 342029c91e99STejun Heo { 342129c91e99STejun Heo int nr_longs = bits / BITS_PER_LONG; 342229c91e99STejun Heo int nr_leftover = bits % BITS_PER_LONG; 342329c91e99STejun Heo unsigned long leftover = 0; 342429c91e99STejun Heo 342529c91e99STejun Heo if (nr_longs) 342629c91e99STejun Heo hash = jhash(bitmap, nr_longs * sizeof(long), hash); 342729c91e99STejun Heo if (nr_leftover) { 342829c91e99STejun Heo bitmap_copy(&leftover, bitmap + nr_longs, nr_leftover); 342929c91e99STejun Heo hash = jhash(&leftover, sizeof(long), hash); 343029c91e99STejun Heo } 343129c91e99STejun Heo return hash; 343229c91e99STejun Heo } 343329c91e99STejun Heo 343429c91e99STejun Heo /* hash value of the content of @attr */ 343529c91e99STejun Heo static u32 wqattrs_hash(const struct workqueue_attrs *attrs) 343629c91e99STejun Heo { 343729c91e99STejun Heo u32 hash = 0; 343829c91e99STejun Heo 343929c91e99STejun Heo hash = jhash_1word(attrs->nice, hash); 344029c91e99STejun Heo hash = jhash_bitmap(cpumask_bits(attrs->cpumask), nr_cpu_ids, hash); 344129c91e99STejun Heo return hash; 344229c91e99STejun Heo } 344329c91e99STejun Heo 344429c91e99STejun Heo /* content equality test */ 344529c91e99STejun Heo static bool wqattrs_equal(const struct workqueue_attrs *a, 344629c91e99STejun Heo const struct workqueue_attrs *b) 344729c91e99STejun Heo { 344829c91e99STejun Heo if (a->nice != b->nice) 344929c91e99STejun Heo return false; 345029c91e99STejun Heo if (!cpumask_equal(a->cpumask, b->cpumask)) 345129c91e99STejun Heo return false; 345229c91e99STejun Heo return true; 345329c91e99STejun Heo } 345429c91e99STejun Heo 34557a4e344cSTejun Heo /** 34567a4e344cSTejun Heo * init_worker_pool - initialize a newly zalloc'd worker_pool 34577a4e344cSTejun Heo * @pool: worker_pool to initialize 34587a4e344cSTejun Heo * 34597a4e344cSTejun Heo * Initiailize a newly zalloc'd @pool. It also allocates @pool->attrs. 346029c91e99STejun Heo * Returns 0 on success, -errno on failure. Even on failure, all fields 346129c91e99STejun Heo * inside @pool proper are initialized and put_unbound_pool() can be called 346229c91e99STejun Heo * on @pool safely to release it. 34637a4e344cSTejun Heo */ 34647a4e344cSTejun Heo static int init_worker_pool(struct worker_pool *pool) 34654e1a1f9aSTejun Heo { 34664e1a1f9aSTejun Heo spin_lock_init(&pool->lock); 346729c91e99STejun Heo pool->id = -1; 346829c91e99STejun Heo pool->cpu = -1; 34694e1a1f9aSTejun Heo pool->flags |= POOL_DISASSOCIATED; 34704e1a1f9aSTejun Heo INIT_LIST_HEAD(&pool->worklist); 34714e1a1f9aSTejun Heo INIT_LIST_HEAD(&pool->idle_list); 34724e1a1f9aSTejun Heo hash_init(pool->busy_hash); 34734e1a1f9aSTejun Heo 34744e1a1f9aSTejun Heo init_timer_deferrable(&pool->idle_timer); 34754e1a1f9aSTejun Heo pool->idle_timer.function = idle_worker_timeout; 34764e1a1f9aSTejun Heo pool->idle_timer.data = (unsigned long)pool; 34774e1a1f9aSTejun Heo 34784e1a1f9aSTejun Heo setup_timer(&pool->mayday_timer, pool_mayday_timeout, 34794e1a1f9aSTejun Heo (unsigned long)pool); 34804e1a1f9aSTejun Heo 34814e1a1f9aSTejun Heo mutex_init(&pool->manager_arb); 3482bc3a1afcSTejun Heo mutex_init(&pool->manager_mutex); 34834e1a1f9aSTejun Heo ida_init(&pool->worker_ida); 34847a4e344cSTejun Heo 348529c91e99STejun Heo INIT_HLIST_NODE(&pool->hash_node); 348629c91e99STejun Heo pool->refcnt = 1; 348729c91e99STejun Heo 348829c91e99STejun Heo /* shouldn't fail above this point */ 34897a4e344cSTejun Heo pool->attrs = alloc_workqueue_attrs(GFP_KERNEL); 34907a4e344cSTejun Heo if (!pool->attrs) 34917a4e344cSTejun Heo return -ENOMEM; 34927a4e344cSTejun Heo return 0; 34934e1a1f9aSTejun Heo } 34944e1a1f9aSTejun Heo 349529c91e99STejun Heo static void rcu_free_pool(struct rcu_head *rcu) 349629c91e99STejun Heo { 349729c91e99STejun Heo struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); 349829c91e99STejun Heo 349929c91e99STejun Heo ida_destroy(&pool->worker_ida); 350029c91e99STejun Heo free_workqueue_attrs(pool->attrs); 350129c91e99STejun Heo kfree(pool); 350229c91e99STejun Heo } 350329c91e99STejun Heo 350429c91e99STejun Heo /** 350529c91e99STejun Heo * put_unbound_pool - put a worker_pool 350629c91e99STejun Heo * @pool: worker_pool to put 350729c91e99STejun Heo * 350829c91e99STejun Heo * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU 3509c5aa87bbSTejun Heo * safe manner. get_unbound_pool() calls this function on its failure path 3510c5aa87bbSTejun Heo * and this function should be able to release pools which went through, 3511c5aa87bbSTejun Heo * successfully or not, init_worker_pool(). 351229c91e99STejun Heo */ 351329c91e99STejun Heo static void put_unbound_pool(struct worker_pool *pool) 351429c91e99STejun Heo { 351529c91e99STejun Heo struct worker *worker; 351629c91e99STejun Heo 351729c91e99STejun Heo spin_lock_irq(&workqueue_lock); 351829c91e99STejun Heo if (--pool->refcnt) { 351929c91e99STejun Heo spin_unlock_irq(&workqueue_lock); 352029c91e99STejun Heo return; 352129c91e99STejun Heo } 352229c91e99STejun Heo 352329c91e99STejun Heo /* sanity checks */ 352429c91e99STejun Heo if (WARN_ON(!(pool->flags & POOL_DISASSOCIATED)) || 352529c91e99STejun Heo WARN_ON(!list_empty(&pool->worklist))) { 352629c91e99STejun Heo spin_unlock_irq(&workqueue_lock); 352729c91e99STejun Heo return; 352829c91e99STejun Heo } 352929c91e99STejun Heo 353029c91e99STejun Heo /* release id and unhash */ 353129c91e99STejun Heo if (pool->id >= 0) 353229c91e99STejun Heo idr_remove(&worker_pool_idr, pool->id); 353329c91e99STejun Heo hash_del(&pool->hash_node); 353429c91e99STejun Heo 353529c91e99STejun Heo spin_unlock_irq(&workqueue_lock); 353629c91e99STejun Heo 3537c5aa87bbSTejun Heo /* 3538c5aa87bbSTejun Heo * Become the manager and destroy all workers. Grabbing 3539c5aa87bbSTejun Heo * manager_arb prevents @pool's workers from blocking on 3540c5aa87bbSTejun Heo * manager_mutex. 3541c5aa87bbSTejun Heo */ 354229c91e99STejun Heo mutex_lock(&pool->manager_arb); 3543cd549687STejun Heo mutex_lock(&pool->manager_mutex); 354429c91e99STejun Heo spin_lock_irq(&pool->lock); 354529c91e99STejun Heo 354629c91e99STejun Heo while ((worker = first_worker(pool))) 354729c91e99STejun Heo destroy_worker(worker); 354829c91e99STejun Heo WARN_ON(pool->nr_workers || pool->nr_idle); 354929c91e99STejun Heo 355029c91e99STejun Heo spin_unlock_irq(&pool->lock); 3551cd549687STejun Heo mutex_unlock(&pool->manager_mutex); 355229c91e99STejun Heo mutex_unlock(&pool->manager_arb); 355329c91e99STejun Heo 355429c91e99STejun Heo /* shut down the timers */ 355529c91e99STejun Heo del_timer_sync(&pool->idle_timer); 355629c91e99STejun Heo del_timer_sync(&pool->mayday_timer); 355729c91e99STejun Heo 355829c91e99STejun Heo /* sched-RCU protected to allow dereferences from get_work_pool() */ 355929c91e99STejun Heo call_rcu_sched(&pool->rcu, rcu_free_pool); 356029c91e99STejun Heo } 356129c91e99STejun Heo 356229c91e99STejun Heo /** 356329c91e99STejun Heo * get_unbound_pool - get a worker_pool with the specified attributes 356429c91e99STejun Heo * @attrs: the attributes of the worker_pool to get 356529c91e99STejun Heo * 356629c91e99STejun Heo * Obtain a worker_pool which has the same attributes as @attrs, bump the 356729c91e99STejun Heo * reference count and return it. If there already is a matching 356829c91e99STejun Heo * worker_pool, it will be used; otherwise, this function attempts to 356929c91e99STejun Heo * create a new one. On failure, returns NULL. 357029c91e99STejun Heo */ 357129c91e99STejun Heo static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) 357229c91e99STejun Heo { 357329c91e99STejun Heo static DEFINE_MUTEX(create_mutex); 357429c91e99STejun Heo u32 hash = wqattrs_hash(attrs); 357529c91e99STejun Heo struct worker_pool *pool; 357629c91e99STejun Heo 357729c91e99STejun Heo mutex_lock(&create_mutex); 357829c91e99STejun Heo 357929c91e99STejun Heo /* do we already have a matching pool? */ 358029c91e99STejun Heo spin_lock_irq(&workqueue_lock); 358129c91e99STejun Heo hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { 358229c91e99STejun Heo if (wqattrs_equal(pool->attrs, attrs)) { 358329c91e99STejun Heo pool->refcnt++; 358429c91e99STejun Heo goto out_unlock; 358529c91e99STejun Heo } 358629c91e99STejun Heo } 358729c91e99STejun Heo spin_unlock_irq(&workqueue_lock); 358829c91e99STejun Heo 358929c91e99STejun Heo /* nope, create a new one */ 359029c91e99STejun Heo pool = kzalloc(sizeof(*pool), GFP_KERNEL); 359129c91e99STejun Heo if (!pool || init_worker_pool(pool) < 0) 359229c91e99STejun Heo goto fail; 359329c91e99STejun Heo 35948864b4e5STejun Heo lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */ 359529c91e99STejun Heo copy_workqueue_attrs(pool->attrs, attrs); 359629c91e99STejun Heo 359729c91e99STejun Heo if (worker_pool_assign_id(pool) < 0) 359829c91e99STejun Heo goto fail; 359929c91e99STejun Heo 360029c91e99STejun Heo /* create and start the initial worker */ 3601ebf44d16STejun Heo if (create_and_start_worker(pool) < 0) 360229c91e99STejun Heo goto fail; 360329c91e99STejun Heo 360429c91e99STejun Heo /* install */ 360529c91e99STejun Heo spin_lock_irq(&workqueue_lock); 360629c91e99STejun Heo hash_add(unbound_pool_hash, &pool->hash_node, hash); 360729c91e99STejun Heo out_unlock: 360829c91e99STejun Heo spin_unlock_irq(&workqueue_lock); 360929c91e99STejun Heo mutex_unlock(&create_mutex); 361029c91e99STejun Heo return pool; 361129c91e99STejun Heo fail: 361229c91e99STejun Heo mutex_unlock(&create_mutex); 361329c91e99STejun Heo if (pool) 361429c91e99STejun Heo put_unbound_pool(pool); 361529c91e99STejun Heo return NULL; 361629c91e99STejun Heo } 361729c91e99STejun Heo 36188864b4e5STejun Heo static void rcu_free_pwq(struct rcu_head *rcu) 36198864b4e5STejun Heo { 36208864b4e5STejun Heo kmem_cache_free(pwq_cache, 36218864b4e5STejun Heo container_of(rcu, struct pool_workqueue, rcu)); 36228864b4e5STejun Heo } 36238864b4e5STejun Heo 36248864b4e5STejun Heo /* 36258864b4e5STejun Heo * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt 36268864b4e5STejun Heo * and needs to be destroyed. 36278864b4e5STejun Heo */ 36288864b4e5STejun Heo static void pwq_unbound_release_workfn(struct work_struct *work) 36298864b4e5STejun Heo { 36308864b4e5STejun Heo struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, 36318864b4e5STejun Heo unbound_release_work); 36328864b4e5STejun Heo struct workqueue_struct *wq = pwq->wq; 36338864b4e5STejun Heo struct worker_pool *pool = pwq->pool; 36348864b4e5STejun Heo 36358864b4e5STejun Heo if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND))) 36368864b4e5STejun Heo return; 36378864b4e5STejun Heo 363875ccf595STejun Heo /* 363975ccf595STejun Heo * Unlink @pwq. Synchronization against flush_mutex isn't strictly 364075ccf595STejun Heo * necessary on release but do it anyway. It's easier to verify 364175ccf595STejun Heo * and consistent with the linking path. 364275ccf595STejun Heo */ 364375ccf595STejun Heo mutex_lock(&wq->flush_mutex); 36448864b4e5STejun Heo spin_lock_irq(&workqueue_lock); 36458864b4e5STejun Heo list_del_rcu(&pwq->pwqs_node); 36468864b4e5STejun Heo spin_unlock_irq(&workqueue_lock); 364775ccf595STejun Heo mutex_unlock(&wq->flush_mutex); 36488864b4e5STejun Heo 36498864b4e5STejun Heo put_unbound_pool(pool); 36508864b4e5STejun Heo call_rcu_sched(&pwq->rcu, rcu_free_pwq); 36518864b4e5STejun Heo 36528864b4e5STejun Heo /* 36538864b4e5STejun Heo * If we're the last pwq going away, @wq is already dead and no one 36548864b4e5STejun Heo * is gonna access it anymore. Free it. 36558864b4e5STejun Heo */ 36568864b4e5STejun Heo if (list_empty(&wq->pwqs)) 36578864b4e5STejun Heo kfree(wq); 36588864b4e5STejun Heo } 36598864b4e5STejun Heo 36600fbd95aaSTejun Heo /** 3661699ce097STejun Heo * pwq_adjust_max_active - update a pwq's max_active to the current setting 36620fbd95aaSTejun Heo * @pwq: target pool_workqueue 36630fbd95aaSTejun Heo * 3664699ce097STejun Heo * If @pwq isn't freezing, set @pwq->max_active to the associated 3665699ce097STejun Heo * workqueue's saved_max_active and activate delayed work items 3666699ce097STejun Heo * accordingly. If @pwq is freezing, clear @pwq->max_active to zero. 36670fbd95aaSTejun Heo */ 3668699ce097STejun Heo static void pwq_adjust_max_active(struct pool_workqueue *pwq) 36690fbd95aaSTejun Heo { 3670699ce097STejun Heo struct workqueue_struct *wq = pwq->wq; 3671699ce097STejun Heo bool freezable = wq->flags & WQ_FREEZABLE; 3672699ce097STejun Heo 3673699ce097STejun Heo /* for @wq->saved_max_active */ 3674699ce097STejun Heo lockdep_assert_held(&workqueue_lock); 3675699ce097STejun Heo 3676699ce097STejun Heo /* fast exit for non-freezable wqs */ 3677699ce097STejun Heo if (!freezable && pwq->max_active == wq->saved_max_active) 3678699ce097STejun Heo return; 3679699ce097STejun Heo 3680699ce097STejun Heo spin_lock(&pwq->pool->lock); 3681699ce097STejun Heo 3682699ce097STejun Heo if (!freezable || !(pwq->pool->flags & POOL_FREEZING)) { 3683699ce097STejun Heo pwq->max_active = wq->saved_max_active; 36840fbd95aaSTejun Heo 36850fbd95aaSTejun Heo while (!list_empty(&pwq->delayed_works) && 36860fbd95aaSTejun Heo pwq->nr_active < pwq->max_active) 36870fbd95aaSTejun Heo pwq_activate_first_delayed(pwq); 3688699ce097STejun Heo } else { 3689699ce097STejun Heo pwq->max_active = 0; 3690699ce097STejun Heo } 3691699ce097STejun Heo 3692699ce097STejun Heo spin_unlock(&pwq->pool->lock); 36930fbd95aaSTejun Heo } 36940fbd95aaSTejun Heo 3695d2c1d404STejun Heo static void init_and_link_pwq(struct pool_workqueue *pwq, 3696d2c1d404STejun Heo struct workqueue_struct *wq, 36979e8cd2f5STejun Heo struct worker_pool *pool, 36989e8cd2f5STejun Heo struct pool_workqueue **p_last_pwq) 3699d2c1d404STejun Heo { 3700d2c1d404STejun Heo BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); 3701d2c1d404STejun Heo 3702d2c1d404STejun Heo pwq->pool = pool; 3703d2c1d404STejun Heo pwq->wq = wq; 3704d2c1d404STejun Heo pwq->flush_color = -1; 37058864b4e5STejun Heo pwq->refcnt = 1; 3706d2c1d404STejun Heo INIT_LIST_HEAD(&pwq->delayed_works); 3707d2c1d404STejun Heo INIT_LIST_HEAD(&pwq->mayday_node); 37088864b4e5STejun Heo INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn); 3709d2c1d404STejun Heo 371075ccf595STejun Heo mutex_lock(&wq->flush_mutex); 371175ccf595STejun Heo spin_lock_irq(&workqueue_lock); 371275ccf595STejun Heo 3713983ca25eSTejun Heo /* 3714983ca25eSTejun Heo * Set the matching work_color. This is synchronized with 3715983ca25eSTejun Heo * flush_mutex to avoid confusing flush_workqueue(). 3716983ca25eSTejun Heo */ 37179e8cd2f5STejun Heo if (p_last_pwq) 37189e8cd2f5STejun Heo *p_last_pwq = first_pwq(wq); 371975ccf595STejun Heo pwq->work_color = wq->work_color; 3720983ca25eSTejun Heo 3721983ca25eSTejun Heo /* sync max_active to the current setting */ 3722983ca25eSTejun Heo pwq_adjust_max_active(pwq); 3723983ca25eSTejun Heo 3724983ca25eSTejun Heo /* link in @pwq */ 37259e8cd2f5STejun Heo list_add_rcu(&pwq->pwqs_node, &wq->pwqs); 372675ccf595STejun Heo 372775ccf595STejun Heo spin_unlock_irq(&workqueue_lock); 372875ccf595STejun Heo mutex_unlock(&wq->flush_mutex); 3729d2c1d404STejun Heo } 3730d2c1d404STejun Heo 37319e8cd2f5STejun Heo /** 37329e8cd2f5STejun Heo * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue 37339e8cd2f5STejun Heo * @wq: the target workqueue 37349e8cd2f5STejun Heo * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs() 37359e8cd2f5STejun Heo * 37369e8cd2f5STejun Heo * Apply @attrs to an unbound workqueue @wq. If @attrs doesn't match the 37379e8cd2f5STejun Heo * current attributes, a new pwq is created and made the first pwq which 37389e8cd2f5STejun Heo * will serve all new work items. Older pwqs are released as in-flight 37399e8cd2f5STejun Heo * work items finish. Note that a work item which repeatedly requeues 37409e8cd2f5STejun Heo * itself back-to-back will stay on its current pwq. 37419e8cd2f5STejun Heo * 37429e8cd2f5STejun Heo * Performs GFP_KERNEL allocations. Returns 0 on success and -errno on 37439e8cd2f5STejun Heo * failure. 37449e8cd2f5STejun Heo */ 37459e8cd2f5STejun Heo int apply_workqueue_attrs(struct workqueue_struct *wq, 37469e8cd2f5STejun Heo const struct workqueue_attrs *attrs) 37479e8cd2f5STejun Heo { 37489e8cd2f5STejun Heo struct pool_workqueue *pwq, *last_pwq; 37499e8cd2f5STejun Heo struct worker_pool *pool; 37509e8cd2f5STejun Heo 37518719dceaSTejun Heo /* only unbound workqueues can change attributes */ 37529e8cd2f5STejun Heo if (WARN_ON(!(wq->flags & WQ_UNBOUND))) 37539e8cd2f5STejun Heo return -EINVAL; 37549e8cd2f5STejun Heo 37558719dceaSTejun Heo /* creating multiple pwqs breaks ordering guarantee */ 37568719dceaSTejun Heo if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs))) 37578719dceaSTejun Heo return -EINVAL; 37588719dceaSTejun Heo 37599e8cd2f5STejun Heo pwq = kmem_cache_zalloc(pwq_cache, GFP_KERNEL); 37609e8cd2f5STejun Heo if (!pwq) 37619e8cd2f5STejun Heo return -ENOMEM; 37629e8cd2f5STejun Heo 37639e8cd2f5STejun Heo pool = get_unbound_pool(attrs); 37649e8cd2f5STejun Heo if (!pool) { 37659e8cd2f5STejun Heo kmem_cache_free(pwq_cache, pwq); 37669e8cd2f5STejun Heo return -ENOMEM; 37679e8cd2f5STejun Heo } 37689e8cd2f5STejun Heo 37699e8cd2f5STejun Heo init_and_link_pwq(pwq, wq, pool, &last_pwq); 37709e8cd2f5STejun Heo if (last_pwq) { 37719e8cd2f5STejun Heo spin_lock_irq(&last_pwq->pool->lock); 37729e8cd2f5STejun Heo put_pwq(last_pwq); 37739e8cd2f5STejun Heo spin_unlock_irq(&last_pwq->pool->lock); 37749e8cd2f5STejun Heo } 37759e8cd2f5STejun Heo 37769e8cd2f5STejun Heo return 0; 37779e8cd2f5STejun Heo } 37789e8cd2f5STejun Heo 377930cdf249STejun Heo static int alloc_and_link_pwqs(struct workqueue_struct *wq) 37801da177e4SLinus Torvalds { 378149e3cf44STejun Heo bool highpri = wq->flags & WQ_HIGHPRI; 378230cdf249STejun Heo int cpu; 3783e1d8aa9fSFrederic Weisbecker 378430cdf249STejun Heo if (!(wq->flags & WQ_UNBOUND)) { 3785420c0ddbSTejun Heo wq->cpu_pwqs = alloc_percpu(struct pool_workqueue); 3786420c0ddbSTejun Heo if (!wq->cpu_pwqs) 378730cdf249STejun Heo return -ENOMEM; 378830cdf249STejun Heo 378930cdf249STejun Heo for_each_possible_cpu(cpu) { 37907fb98ea7STejun Heo struct pool_workqueue *pwq = 37917fb98ea7STejun Heo per_cpu_ptr(wq->cpu_pwqs, cpu); 37927a62c2c8STejun Heo struct worker_pool *cpu_pools = 3793f02ae73aSTejun Heo per_cpu(cpu_worker_pools, cpu); 379430cdf249STejun Heo 37959e8cd2f5STejun Heo init_and_link_pwq(pwq, wq, &cpu_pools[highpri], NULL); 379630cdf249STejun Heo } 379730cdf249STejun Heo return 0; 37989e8cd2f5STejun Heo } else { 37999e8cd2f5STejun Heo return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); 38009e8cd2f5STejun Heo } 38010f900049STejun Heo } 38020f900049STejun Heo 3803f3421797STejun Heo static int wq_clamp_max_active(int max_active, unsigned int flags, 3804f3421797STejun Heo const char *name) 3805b71ab8c2STejun Heo { 3806f3421797STejun Heo int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE; 3807f3421797STejun Heo 3808f3421797STejun Heo if (max_active < 1 || max_active > lim) 3809044c782cSValentin Ilie pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n", 3810f3421797STejun Heo max_active, name, 1, lim); 3811b71ab8c2STejun Heo 3812f3421797STejun Heo return clamp_val(max_active, 1, lim); 3813b71ab8c2STejun Heo } 3814b71ab8c2STejun Heo 3815b196be89STejun Heo struct workqueue_struct *__alloc_workqueue_key(const char *fmt, 381697e37d7bSTejun Heo unsigned int flags, 38171e19ffc6STejun Heo int max_active, 3818eb13ba87SJohannes Berg struct lock_class_key *key, 3819b196be89STejun Heo const char *lock_name, ...) 38203af24433SOleg Nesterov { 3821b196be89STejun Heo va_list args, args1; 38223af24433SOleg Nesterov struct workqueue_struct *wq; 382349e3cf44STejun Heo struct pool_workqueue *pwq; 3824b196be89STejun Heo size_t namelen; 3825b196be89STejun Heo 3826b196be89STejun Heo /* determine namelen, allocate wq and format name */ 3827b196be89STejun Heo va_start(args, lock_name); 3828b196be89STejun Heo va_copy(args1, args); 3829b196be89STejun Heo namelen = vsnprintf(NULL, 0, fmt, args) + 1; 3830b196be89STejun Heo 3831b196be89STejun Heo wq = kzalloc(sizeof(*wq) + namelen, GFP_KERNEL); 3832b196be89STejun Heo if (!wq) 3833d2c1d404STejun Heo return NULL; 3834b196be89STejun Heo 3835b196be89STejun Heo vsnprintf(wq->name, namelen, fmt, args1); 3836b196be89STejun Heo va_end(args); 3837b196be89STejun Heo va_end(args1); 38383af24433SOleg Nesterov 3839d320c038STejun Heo max_active = max_active ?: WQ_DFL_ACTIVE; 3840b196be89STejun Heo max_active = wq_clamp_max_active(max_active, flags, wq->name); 38413af24433SOleg Nesterov 3842b196be89STejun Heo /* init wq */ 384397e37d7bSTejun Heo wq->flags = flags; 3844a0a1a5fdSTejun Heo wq->saved_max_active = max_active; 384573f53c4aSTejun Heo mutex_init(&wq->flush_mutex); 3846112202d9STejun Heo atomic_set(&wq->nr_pwqs_to_flush, 0); 384730cdf249STejun Heo INIT_LIST_HEAD(&wq->pwqs); 384873f53c4aSTejun Heo INIT_LIST_HEAD(&wq->flusher_queue); 384973f53c4aSTejun Heo INIT_LIST_HEAD(&wq->flusher_overflow); 3850493a1724STejun Heo INIT_LIST_HEAD(&wq->maydays); 38513af24433SOleg Nesterov 3852eb13ba87SJohannes Berg lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); 3853cce1a165SOleg Nesterov INIT_LIST_HEAD(&wq->list); 38543af24433SOleg Nesterov 385530cdf249STejun Heo if (alloc_and_link_pwqs(wq) < 0) 3856d2c1d404STejun Heo goto err_free_wq; 38571537663fSTejun Heo 3858493008a8STejun Heo /* 3859493008a8STejun Heo * Workqueues which may be used during memory reclaim should 3860493008a8STejun Heo * have a rescuer to guarantee forward progress. 3861493008a8STejun Heo */ 3862493008a8STejun Heo if (flags & WQ_MEM_RECLAIM) { 3863e22bee78STejun Heo struct worker *rescuer; 3864e22bee78STejun Heo 3865d2c1d404STejun Heo rescuer = alloc_worker(); 3866e22bee78STejun Heo if (!rescuer) 3867d2c1d404STejun Heo goto err_destroy; 3868e22bee78STejun Heo 3869111c225aSTejun Heo rescuer->rescue_wq = wq; 3870111c225aSTejun Heo rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", 3871b196be89STejun Heo wq->name); 3872d2c1d404STejun Heo if (IS_ERR(rescuer->task)) { 3873d2c1d404STejun Heo kfree(rescuer); 3874d2c1d404STejun Heo goto err_destroy; 3875d2c1d404STejun Heo } 3876e22bee78STejun Heo 3877d2c1d404STejun Heo wq->rescuer = rescuer; 3878e22bee78STejun Heo rescuer->task->flags |= PF_THREAD_BOUND; 3879e22bee78STejun Heo wake_up_process(rescuer->task); 38803af24433SOleg Nesterov } 38811537663fSTejun Heo 3882226223abSTejun Heo if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq)) 3883226223abSTejun Heo goto err_destroy; 3884226223abSTejun Heo 38853af24433SOleg Nesterov /* 3886699ce097STejun Heo * workqueue_lock protects global freeze state and workqueues list. 3887699ce097STejun Heo * Grab it, adjust max_active and add the new workqueue to 3888699ce097STejun Heo * workqueues list. 38893af24433SOleg Nesterov */ 3890e98d5b16STejun Heo spin_lock_irq(&workqueue_lock); 3891a0a1a5fdSTejun Heo 389249e3cf44STejun Heo for_each_pwq(pwq, wq) 3893699ce097STejun Heo pwq_adjust_max_active(pwq); 3894a0a1a5fdSTejun Heo 38953af24433SOleg Nesterov list_add(&wq->list, &workqueues); 3896a0a1a5fdSTejun Heo 3897e98d5b16STejun Heo spin_unlock_irq(&workqueue_lock); 38983af24433SOleg Nesterov 38993af24433SOleg Nesterov return wq; 3900d2c1d404STejun Heo 3901d2c1d404STejun Heo err_free_wq: 39024690c4abSTejun Heo kfree(wq); 3903d2c1d404STejun Heo return NULL; 3904d2c1d404STejun Heo err_destroy: 3905d2c1d404STejun Heo destroy_workqueue(wq); 39064690c4abSTejun Heo return NULL; 39071da177e4SLinus Torvalds } 3908d320c038STejun Heo EXPORT_SYMBOL_GPL(__alloc_workqueue_key); 39091da177e4SLinus Torvalds 39103af24433SOleg Nesterov /** 39113af24433SOleg Nesterov * destroy_workqueue - safely terminate a workqueue 39123af24433SOleg Nesterov * @wq: target workqueue 39133af24433SOleg Nesterov * 39143af24433SOleg Nesterov * Safely destroy a workqueue. All work currently pending will be done first. 39153af24433SOleg Nesterov */ 39163af24433SOleg Nesterov void destroy_workqueue(struct workqueue_struct *wq) 39173af24433SOleg Nesterov { 391849e3cf44STejun Heo struct pool_workqueue *pwq; 39193af24433SOleg Nesterov 39209c5a2ba7STejun Heo /* drain it before proceeding with destruction */ 39219c5a2ba7STejun Heo drain_workqueue(wq); 3922c8efcc25STejun Heo 392376af4d93STejun Heo spin_lock_irq(&workqueue_lock); 392476af4d93STejun Heo 39256183c009STejun Heo /* sanity checks */ 392649e3cf44STejun Heo for_each_pwq(pwq, wq) { 39276183c009STejun Heo int i; 39286183c009STejun Heo 392976af4d93STejun Heo for (i = 0; i < WORK_NR_COLORS; i++) { 393076af4d93STejun Heo if (WARN_ON(pwq->nr_in_flight[i])) { 393176af4d93STejun Heo spin_unlock_irq(&workqueue_lock); 39326183c009STejun Heo return; 393376af4d93STejun Heo } 393476af4d93STejun Heo } 393576af4d93STejun Heo 39368864b4e5STejun Heo if (WARN_ON(pwq->refcnt > 1) || 39378864b4e5STejun Heo WARN_ON(pwq->nr_active) || 393876af4d93STejun Heo WARN_ON(!list_empty(&pwq->delayed_works))) { 393976af4d93STejun Heo spin_unlock_irq(&workqueue_lock); 39406183c009STejun Heo return; 39416183c009STejun Heo } 394276af4d93STejun Heo } 39436183c009STejun Heo 3944a0a1a5fdSTejun Heo /* 3945a0a1a5fdSTejun Heo * wq list is used to freeze wq, remove from list after 3946a0a1a5fdSTejun Heo * flushing is complete in case freeze races us. 3947a0a1a5fdSTejun Heo */ 3948d2c1d404STejun Heo list_del_init(&wq->list); 394976af4d93STejun Heo 3950e98d5b16STejun Heo spin_unlock_irq(&workqueue_lock); 39513af24433SOleg Nesterov 3952226223abSTejun Heo workqueue_sysfs_unregister(wq); 3953226223abSTejun Heo 3954493008a8STejun Heo if (wq->rescuer) { 3955e22bee78STejun Heo kthread_stop(wq->rescuer->task); 39568d9df9f0SXiaotian Feng kfree(wq->rescuer); 3957493008a8STejun Heo wq->rescuer = NULL; 3958e22bee78STejun Heo } 3959e22bee78STejun Heo 39608864b4e5STejun Heo if (!(wq->flags & WQ_UNBOUND)) { 396129c91e99STejun Heo /* 39628864b4e5STejun Heo * The base ref is never dropped on per-cpu pwqs. Directly 39638864b4e5STejun Heo * free the pwqs and wq. 396429c91e99STejun Heo */ 39658864b4e5STejun Heo free_percpu(wq->cpu_pwqs); 39668864b4e5STejun Heo kfree(wq); 39678864b4e5STejun Heo } else { 39688864b4e5STejun Heo /* 39698864b4e5STejun Heo * We're the sole accessor of @wq at this point. Directly 39708864b4e5STejun Heo * access the first pwq and put the base ref. As both pwqs 39718864b4e5STejun Heo * and pools are sched-RCU protected, the lock operations 39728864b4e5STejun Heo * are safe. @wq will be freed when the last pwq is 39738864b4e5STejun Heo * released. 39748864b4e5STejun Heo */ 397529c91e99STejun Heo pwq = list_first_entry(&wq->pwqs, struct pool_workqueue, 397629c91e99STejun Heo pwqs_node); 39778864b4e5STejun Heo spin_lock_irq(&pwq->pool->lock); 39788864b4e5STejun Heo put_pwq(pwq); 39798864b4e5STejun Heo spin_unlock_irq(&pwq->pool->lock); 398029c91e99STejun Heo } 39813af24433SOleg Nesterov } 39823af24433SOleg Nesterov EXPORT_SYMBOL_GPL(destroy_workqueue); 39833af24433SOleg Nesterov 3984dcd989cbSTejun Heo /** 3985dcd989cbSTejun Heo * workqueue_set_max_active - adjust max_active of a workqueue 3986dcd989cbSTejun Heo * @wq: target workqueue 3987dcd989cbSTejun Heo * @max_active: new max_active value. 3988dcd989cbSTejun Heo * 3989dcd989cbSTejun Heo * Set max_active of @wq to @max_active. 3990dcd989cbSTejun Heo * 3991dcd989cbSTejun Heo * CONTEXT: 3992dcd989cbSTejun Heo * Don't call from IRQ context. 3993dcd989cbSTejun Heo */ 3994dcd989cbSTejun Heo void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) 3995dcd989cbSTejun Heo { 399649e3cf44STejun Heo struct pool_workqueue *pwq; 3997dcd989cbSTejun Heo 39988719dceaSTejun Heo /* disallow meddling with max_active for ordered workqueues */ 39998719dceaSTejun Heo if (WARN_ON(wq->flags & __WQ_ORDERED)) 40008719dceaSTejun Heo return; 40018719dceaSTejun Heo 4002f3421797STejun Heo max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); 4003dcd989cbSTejun Heo 4004e98d5b16STejun Heo spin_lock_irq(&workqueue_lock); 4005dcd989cbSTejun Heo 4006dcd989cbSTejun Heo wq->saved_max_active = max_active; 4007dcd989cbSTejun Heo 4008699ce097STejun Heo for_each_pwq(pwq, wq) 4009699ce097STejun Heo pwq_adjust_max_active(pwq); 4010dcd989cbSTejun Heo 4011e98d5b16STejun Heo spin_unlock_irq(&workqueue_lock); 4012dcd989cbSTejun Heo } 4013dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(workqueue_set_max_active); 4014dcd989cbSTejun Heo 4015dcd989cbSTejun Heo /** 4016e6267616STejun Heo * current_is_workqueue_rescuer - is %current workqueue rescuer? 4017e6267616STejun Heo * 4018e6267616STejun Heo * Determine whether %current is a workqueue rescuer. Can be used from 4019e6267616STejun Heo * work functions to determine whether it's being run off the rescuer task. 4020e6267616STejun Heo */ 4021e6267616STejun Heo bool current_is_workqueue_rescuer(void) 4022e6267616STejun Heo { 4023e6267616STejun Heo struct worker *worker = current_wq_worker(); 4024e6267616STejun Heo 4025e6267616STejun Heo return worker && worker == worker->current_pwq->wq->rescuer; 4026e6267616STejun Heo } 4027e6267616STejun Heo 4028e6267616STejun Heo /** 4029dcd989cbSTejun Heo * workqueue_congested - test whether a workqueue is congested 4030dcd989cbSTejun Heo * @cpu: CPU in question 4031dcd989cbSTejun Heo * @wq: target workqueue 4032dcd989cbSTejun Heo * 4033dcd989cbSTejun Heo * Test whether @wq's cpu workqueue for @cpu is congested. There is 4034dcd989cbSTejun Heo * no synchronization around this function and the test result is 4035dcd989cbSTejun Heo * unreliable and only useful as advisory hints or for debugging. 4036dcd989cbSTejun Heo * 4037dcd989cbSTejun Heo * RETURNS: 4038dcd989cbSTejun Heo * %true if congested, %false otherwise. 4039dcd989cbSTejun Heo */ 4040d84ff051STejun Heo bool workqueue_congested(int cpu, struct workqueue_struct *wq) 4041dcd989cbSTejun Heo { 40427fb98ea7STejun Heo struct pool_workqueue *pwq; 404376af4d93STejun Heo bool ret; 404476af4d93STejun Heo 404576af4d93STejun Heo preempt_disable(); 40467fb98ea7STejun Heo 40477fb98ea7STejun Heo if (!(wq->flags & WQ_UNBOUND)) 40487fb98ea7STejun Heo pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); 40497fb98ea7STejun Heo else 40507fb98ea7STejun Heo pwq = first_pwq(wq); 4051dcd989cbSTejun Heo 405276af4d93STejun Heo ret = !list_empty(&pwq->delayed_works); 405376af4d93STejun Heo preempt_enable(); 405476af4d93STejun Heo 405576af4d93STejun Heo return ret; 4056dcd989cbSTejun Heo } 4057dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(workqueue_congested); 4058dcd989cbSTejun Heo 4059dcd989cbSTejun Heo /** 4060dcd989cbSTejun Heo * work_busy - test whether a work is currently pending or running 4061dcd989cbSTejun Heo * @work: the work to be tested 4062dcd989cbSTejun Heo * 4063dcd989cbSTejun Heo * Test whether @work is currently pending or running. There is no 4064dcd989cbSTejun Heo * synchronization around this function and the test result is 4065dcd989cbSTejun Heo * unreliable and only useful as advisory hints or for debugging. 4066dcd989cbSTejun Heo * 4067dcd989cbSTejun Heo * RETURNS: 4068dcd989cbSTejun Heo * OR'd bitmask of WORK_BUSY_* bits. 4069dcd989cbSTejun Heo */ 4070dcd989cbSTejun Heo unsigned int work_busy(struct work_struct *work) 4071dcd989cbSTejun Heo { 4072fa1b54e6STejun Heo struct worker_pool *pool; 4073dcd989cbSTejun Heo unsigned long flags; 4074dcd989cbSTejun Heo unsigned int ret = 0; 4075dcd989cbSTejun Heo 4076dcd989cbSTejun Heo if (work_pending(work)) 4077dcd989cbSTejun Heo ret |= WORK_BUSY_PENDING; 4078038366c5SLai Jiangshan 4079fa1b54e6STejun Heo local_irq_save(flags); 4080fa1b54e6STejun Heo pool = get_work_pool(work); 4081038366c5SLai Jiangshan if (pool) { 4082fa1b54e6STejun Heo spin_lock(&pool->lock); 4083c9e7cf27STejun Heo if (find_worker_executing_work(pool, work)) 4084dcd989cbSTejun Heo ret |= WORK_BUSY_RUNNING; 4085fa1b54e6STejun Heo spin_unlock(&pool->lock); 4086038366c5SLai Jiangshan } 4087fa1b54e6STejun Heo local_irq_restore(flags); 4088dcd989cbSTejun Heo 4089dcd989cbSTejun Heo return ret; 4090dcd989cbSTejun Heo } 4091dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(work_busy); 4092dcd989cbSTejun Heo 4093db7bccf4STejun Heo /* 4094db7bccf4STejun Heo * CPU hotplug. 4095db7bccf4STejun Heo * 4096e22bee78STejun Heo * There are two challenges in supporting CPU hotplug. Firstly, there 4097112202d9STejun Heo * are a lot of assumptions on strong associations among work, pwq and 4098706026c2STejun Heo * pool which make migrating pending and scheduled works very 4099e22bee78STejun Heo * difficult to implement without impacting hot paths. Secondly, 410094cf58bbSTejun Heo * worker pools serve mix of short, long and very long running works making 4101e22bee78STejun Heo * blocked draining impractical. 4102e22bee78STejun Heo * 410324647570STejun Heo * This is solved by allowing the pools to be disassociated from the CPU 4104628c78e7STejun Heo * running as an unbound one and allowing it to be reattached later if the 4105628c78e7STejun Heo * cpu comes back online. 4106db7bccf4STejun Heo */ 4107db7bccf4STejun Heo 4108706026c2STejun Heo static void wq_unbind_fn(struct work_struct *work) 4109db7bccf4STejun Heo { 411038db41d9STejun Heo int cpu = smp_processor_id(); 41114ce62e9eSTejun Heo struct worker_pool *pool; 4112db7bccf4STejun Heo struct worker *worker; 4113db7bccf4STejun Heo int i; 4114db7bccf4STejun Heo 4115f02ae73aSTejun Heo for_each_cpu_worker_pool(pool, cpu) { 41166183c009STejun Heo WARN_ON_ONCE(cpu != smp_processor_id()); 4117db7bccf4STejun Heo 4118bc3a1afcSTejun Heo mutex_lock(&pool->manager_mutex); 411994cf58bbSTejun Heo spin_lock_irq(&pool->lock); 4120e22bee78STejun Heo 4121f2d5a0eeSTejun Heo /* 4122bc3a1afcSTejun Heo * We've blocked all manager operations. Make all workers 412394cf58bbSTejun Heo * unbound and set DISASSOCIATED. Before this, all workers 412494cf58bbSTejun Heo * except for the ones which are still executing works from 412594cf58bbSTejun Heo * before the last CPU down must be on the cpu. After 412694cf58bbSTejun Heo * this, they may become diasporas. 4127f2d5a0eeSTejun Heo */ 41284ce62e9eSTejun Heo list_for_each_entry(worker, &pool->idle_list, entry) 4129403c821dSTejun Heo worker->flags |= WORKER_UNBOUND; 4130db7bccf4STejun Heo 4131b67bfe0dSSasha Levin for_each_busy_worker(worker, i, pool) 4132403c821dSTejun Heo worker->flags |= WORKER_UNBOUND; 4133db7bccf4STejun Heo 413424647570STejun Heo pool->flags |= POOL_DISASSOCIATED; 4135f2d5a0eeSTejun Heo 413694cf58bbSTejun Heo spin_unlock_irq(&pool->lock); 4137bc3a1afcSTejun Heo mutex_unlock(&pool->manager_mutex); 413894cf58bbSTejun Heo } 4139e22bee78STejun Heo 4140e22bee78STejun Heo /* 4141628c78e7STejun Heo * Call schedule() so that we cross rq->lock and thus can guarantee 4142628c78e7STejun Heo * sched callbacks see the %WORKER_UNBOUND flag. This is necessary 4143628c78e7STejun Heo * as scheduler callbacks may be invoked from other cpus. 4144628c78e7STejun Heo */ 4145628c78e7STejun Heo schedule(); 4146628c78e7STejun Heo 4147628c78e7STejun Heo /* 4148628c78e7STejun Heo * Sched callbacks are disabled now. Zap nr_running. After this, 4149628c78e7STejun Heo * nr_running stays zero and need_more_worker() and keep_working() 415038db41d9STejun Heo * are always true as long as the worklist is not empty. Pools on 415138db41d9STejun Heo * @cpu now behave as unbound (in terms of concurrency management) 415238db41d9STejun Heo * pools which are served by workers tied to the CPU. 4153628c78e7STejun Heo * 4154628c78e7STejun Heo * On return from this function, the current worker would trigger 4155628c78e7STejun Heo * unbound chain execution of pending work items if other workers 4156628c78e7STejun Heo * didn't already. 4157e22bee78STejun Heo */ 4158f02ae73aSTejun Heo for_each_cpu_worker_pool(pool, cpu) 4159e19e397aSTejun Heo atomic_set(&pool->nr_running, 0); 4160db7bccf4STejun Heo } 4161db7bccf4STejun Heo 41628db25e78STejun Heo /* 41638db25e78STejun Heo * Workqueues should be brought up before normal priority CPU notifiers. 41648db25e78STejun Heo * This will be registered high priority CPU notifier. 41658db25e78STejun Heo */ 41669fdf9b73SLai Jiangshan static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, 41671da177e4SLinus Torvalds unsigned long action, 41681da177e4SLinus Torvalds void *hcpu) 41691da177e4SLinus Torvalds { 4170d84ff051STejun Heo int cpu = (unsigned long)hcpu; 41714ce62e9eSTejun Heo struct worker_pool *pool; 41721da177e4SLinus Torvalds 41738db25e78STejun Heo switch (action & ~CPU_TASKS_FROZEN) { 41743af24433SOleg Nesterov case CPU_UP_PREPARE: 4175f02ae73aSTejun Heo for_each_cpu_worker_pool(pool, cpu) { 41763ce63377STejun Heo if (pool->nr_workers) 41773ce63377STejun Heo continue; 4178ebf44d16STejun Heo if (create_and_start_worker(pool) < 0) 41793ce63377STejun Heo return NOTIFY_BAD; 41803af24433SOleg Nesterov } 41811da177e4SLinus Torvalds break; 41821da177e4SLinus Torvalds 418365758202STejun Heo case CPU_DOWN_FAILED: 418465758202STejun Heo case CPU_ONLINE: 4185f02ae73aSTejun Heo for_each_cpu_worker_pool(pool, cpu) { 4186bc3a1afcSTejun Heo mutex_lock(&pool->manager_mutex); 418794cf58bbSTejun Heo spin_lock_irq(&pool->lock); 418894cf58bbSTejun Heo 418924647570STejun Heo pool->flags &= ~POOL_DISASSOCIATED; 419094cf58bbSTejun Heo rebind_workers(pool); 419194cf58bbSTejun Heo 419294cf58bbSTejun Heo spin_unlock_irq(&pool->lock); 4193bc3a1afcSTejun Heo mutex_unlock(&pool->manager_mutex); 419494cf58bbSTejun Heo } 41958db25e78STejun Heo break; 419665758202STejun Heo } 419765758202STejun Heo return NOTIFY_OK; 419865758202STejun Heo } 419965758202STejun Heo 420065758202STejun Heo /* 420165758202STejun Heo * Workqueues should be brought down after normal priority CPU notifiers. 420265758202STejun Heo * This will be registered as low priority CPU notifier. 420365758202STejun Heo */ 42049fdf9b73SLai Jiangshan static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb, 420565758202STejun Heo unsigned long action, 420665758202STejun Heo void *hcpu) 420765758202STejun Heo { 4208d84ff051STejun Heo int cpu = (unsigned long)hcpu; 42098db25e78STejun Heo struct work_struct unbind_work; 42108db25e78STejun Heo 421165758202STejun Heo switch (action & ~CPU_TASKS_FROZEN) { 421265758202STejun Heo case CPU_DOWN_PREPARE: 42138db25e78STejun Heo /* unbinding should happen on the local CPU */ 4214706026c2STejun Heo INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); 42157635d2fdSJoonsoo Kim queue_work_on(cpu, system_highpri_wq, &unbind_work); 42168db25e78STejun Heo flush_work(&unbind_work); 42178db25e78STejun Heo break; 421865758202STejun Heo } 421965758202STejun Heo return NOTIFY_OK; 422065758202STejun Heo } 422165758202STejun Heo 42222d3854a3SRusty Russell #ifdef CONFIG_SMP 42238ccad40dSRusty Russell 42242d3854a3SRusty Russell struct work_for_cpu { 4225ed48ece2STejun Heo struct work_struct work; 42262d3854a3SRusty Russell long (*fn)(void *); 42272d3854a3SRusty Russell void *arg; 42282d3854a3SRusty Russell long ret; 42292d3854a3SRusty Russell }; 42302d3854a3SRusty Russell 4231ed48ece2STejun Heo static void work_for_cpu_fn(struct work_struct *work) 42322d3854a3SRusty Russell { 4233ed48ece2STejun Heo struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); 4234ed48ece2STejun Heo 42352d3854a3SRusty Russell wfc->ret = wfc->fn(wfc->arg); 42362d3854a3SRusty Russell } 42372d3854a3SRusty Russell 42382d3854a3SRusty Russell /** 42392d3854a3SRusty Russell * work_on_cpu - run a function in user context on a particular cpu 42402d3854a3SRusty Russell * @cpu: the cpu to run on 42412d3854a3SRusty Russell * @fn: the function to run 42422d3854a3SRusty Russell * @arg: the function arg 42432d3854a3SRusty Russell * 424431ad9081SRusty Russell * This will return the value @fn returns. 424531ad9081SRusty Russell * It is up to the caller to ensure that the cpu doesn't go offline. 42466b44003eSAndrew Morton * The caller must not hold any locks which would prevent @fn from completing. 42472d3854a3SRusty Russell */ 4248d84ff051STejun Heo long work_on_cpu(int cpu, long (*fn)(void *), void *arg) 42492d3854a3SRusty Russell { 4250ed48ece2STejun Heo struct work_for_cpu wfc = { .fn = fn, .arg = arg }; 42512d3854a3SRusty Russell 4252ed48ece2STejun Heo INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); 4253ed48ece2STejun Heo schedule_work_on(cpu, &wfc.work); 4254ed48ece2STejun Heo flush_work(&wfc.work); 42552d3854a3SRusty Russell return wfc.ret; 42562d3854a3SRusty Russell } 42572d3854a3SRusty Russell EXPORT_SYMBOL_GPL(work_on_cpu); 42582d3854a3SRusty Russell #endif /* CONFIG_SMP */ 42592d3854a3SRusty Russell 4260a0a1a5fdSTejun Heo #ifdef CONFIG_FREEZER 4261e7577c50SRusty Russell 4262a0a1a5fdSTejun Heo /** 4263a0a1a5fdSTejun Heo * freeze_workqueues_begin - begin freezing workqueues 4264a0a1a5fdSTejun Heo * 426558a69cb4STejun Heo * Start freezing workqueues. After this function returns, all freezable 4266c5aa87bbSTejun Heo * workqueues will queue new works to their delayed_works list instead of 4267706026c2STejun Heo * pool->worklist. 4268a0a1a5fdSTejun Heo * 4269a0a1a5fdSTejun Heo * CONTEXT: 4270d565ed63STejun Heo * Grabs and releases workqueue_lock and pool->lock's. 4271a0a1a5fdSTejun Heo */ 4272a0a1a5fdSTejun Heo void freeze_workqueues_begin(void) 4273a0a1a5fdSTejun Heo { 427417116969STejun Heo struct worker_pool *pool; 427524b8a847STejun Heo struct workqueue_struct *wq; 427624b8a847STejun Heo struct pool_workqueue *pwq; 4277611c92a0STejun Heo int pi; 4278a0a1a5fdSTejun Heo 4279e98d5b16STejun Heo spin_lock_irq(&workqueue_lock); 4280a0a1a5fdSTejun Heo 42816183c009STejun Heo WARN_ON_ONCE(workqueue_freezing); 4282a0a1a5fdSTejun Heo workqueue_freezing = true; 4283a0a1a5fdSTejun Heo 428424b8a847STejun Heo /* set FREEZING */ 4285611c92a0STejun Heo for_each_pool(pool, pi) { 4286e98d5b16STejun Heo spin_lock(&pool->lock); 428735b6bb63STejun Heo WARN_ON_ONCE(pool->flags & POOL_FREEZING); 428835b6bb63STejun Heo pool->flags |= POOL_FREEZING; 428924b8a847STejun Heo spin_unlock(&pool->lock); 42901da177e4SLinus Torvalds } 42918b03ae3cSTejun Heo 429224b8a847STejun Heo /* suppress further executions by setting max_active to zero */ 429324b8a847STejun Heo list_for_each_entry(wq, &workqueues, list) { 4294699ce097STejun Heo for_each_pwq(pwq, wq) 4295699ce097STejun Heo pwq_adjust_max_active(pwq); 4296a1056305STejun Heo } 4297a0a1a5fdSTejun Heo 4298e98d5b16STejun Heo spin_unlock_irq(&workqueue_lock); 4299a0a1a5fdSTejun Heo } 4300a0a1a5fdSTejun Heo 4301a0a1a5fdSTejun Heo /** 430258a69cb4STejun Heo * freeze_workqueues_busy - are freezable workqueues still busy? 4303a0a1a5fdSTejun Heo * 4304a0a1a5fdSTejun Heo * Check whether freezing is complete. This function must be called 4305a0a1a5fdSTejun Heo * between freeze_workqueues_begin() and thaw_workqueues(). 4306a0a1a5fdSTejun Heo * 4307a0a1a5fdSTejun Heo * CONTEXT: 4308a0a1a5fdSTejun Heo * Grabs and releases workqueue_lock. 4309a0a1a5fdSTejun Heo * 4310a0a1a5fdSTejun Heo * RETURNS: 431158a69cb4STejun Heo * %true if some freezable workqueues are still busy. %false if freezing 431258a69cb4STejun Heo * is complete. 4313a0a1a5fdSTejun Heo */ 4314a0a1a5fdSTejun Heo bool freeze_workqueues_busy(void) 4315a0a1a5fdSTejun Heo { 4316a0a1a5fdSTejun Heo bool busy = false; 431724b8a847STejun Heo struct workqueue_struct *wq; 431824b8a847STejun Heo struct pool_workqueue *pwq; 4319a0a1a5fdSTejun Heo 4320e98d5b16STejun Heo spin_lock_irq(&workqueue_lock); 4321a0a1a5fdSTejun Heo 43226183c009STejun Heo WARN_ON_ONCE(!workqueue_freezing); 4323a0a1a5fdSTejun Heo 432424b8a847STejun Heo list_for_each_entry(wq, &workqueues, list) { 432524b8a847STejun Heo if (!(wq->flags & WQ_FREEZABLE)) 432624b8a847STejun Heo continue; 4327a0a1a5fdSTejun Heo /* 4328a0a1a5fdSTejun Heo * nr_active is monotonically decreasing. It's safe 4329a0a1a5fdSTejun Heo * to peek without lock. 4330a0a1a5fdSTejun Heo */ 433124b8a847STejun Heo for_each_pwq(pwq, wq) { 43326183c009STejun Heo WARN_ON_ONCE(pwq->nr_active < 0); 4333112202d9STejun Heo if (pwq->nr_active) { 4334a0a1a5fdSTejun Heo busy = true; 4335a0a1a5fdSTejun Heo goto out_unlock; 4336a0a1a5fdSTejun Heo } 4337a0a1a5fdSTejun Heo } 4338a0a1a5fdSTejun Heo } 4339a0a1a5fdSTejun Heo out_unlock: 4340e98d5b16STejun Heo spin_unlock_irq(&workqueue_lock); 4341a0a1a5fdSTejun Heo return busy; 4342a0a1a5fdSTejun Heo } 4343a0a1a5fdSTejun Heo 4344a0a1a5fdSTejun Heo /** 4345a0a1a5fdSTejun Heo * thaw_workqueues - thaw workqueues 4346a0a1a5fdSTejun Heo * 4347a0a1a5fdSTejun Heo * Thaw workqueues. Normal queueing is restored and all collected 4348706026c2STejun Heo * frozen works are transferred to their respective pool worklists. 4349a0a1a5fdSTejun Heo * 4350a0a1a5fdSTejun Heo * CONTEXT: 4351d565ed63STejun Heo * Grabs and releases workqueue_lock and pool->lock's. 4352a0a1a5fdSTejun Heo */ 4353a0a1a5fdSTejun Heo void thaw_workqueues(void) 4354a0a1a5fdSTejun Heo { 435524b8a847STejun Heo struct workqueue_struct *wq; 435624b8a847STejun Heo struct pool_workqueue *pwq; 435724b8a847STejun Heo struct worker_pool *pool; 4358611c92a0STejun Heo int pi; 4359a0a1a5fdSTejun Heo 4360e98d5b16STejun Heo spin_lock_irq(&workqueue_lock); 4361a0a1a5fdSTejun Heo 4362a0a1a5fdSTejun Heo if (!workqueue_freezing) 4363a0a1a5fdSTejun Heo goto out_unlock; 4364a0a1a5fdSTejun Heo 436524b8a847STejun Heo /* clear FREEZING */ 4366611c92a0STejun Heo for_each_pool(pool, pi) { 4367e98d5b16STejun Heo spin_lock(&pool->lock); 436835b6bb63STejun Heo WARN_ON_ONCE(!(pool->flags & POOL_FREEZING)); 436935b6bb63STejun Heo pool->flags &= ~POOL_FREEZING; 4370e98d5b16STejun Heo spin_unlock(&pool->lock); 4371d565ed63STejun Heo } 437224b8a847STejun Heo 437324b8a847STejun Heo /* restore max_active and repopulate worklist */ 437424b8a847STejun Heo list_for_each_entry(wq, &workqueues, list) { 4375699ce097STejun Heo for_each_pwq(pwq, wq) 4376699ce097STejun Heo pwq_adjust_max_active(pwq); 437724b8a847STejun Heo } 437824b8a847STejun Heo 437924b8a847STejun Heo /* kick workers */ 4380611c92a0STejun Heo for_each_pool(pool, pi) { 438124b8a847STejun Heo spin_lock(&pool->lock); 438224b8a847STejun Heo wake_up_worker(pool); 438324b8a847STejun Heo spin_unlock(&pool->lock); 4384a0a1a5fdSTejun Heo } 4385a0a1a5fdSTejun Heo 4386a0a1a5fdSTejun Heo workqueue_freezing = false; 4387a0a1a5fdSTejun Heo out_unlock: 4388e98d5b16STejun Heo spin_unlock_irq(&workqueue_lock); 4389a0a1a5fdSTejun Heo } 4390a0a1a5fdSTejun Heo #endif /* CONFIG_FREEZER */ 4391a0a1a5fdSTejun Heo 43926ee0578bSSuresh Siddha static int __init init_workqueues(void) 43931da177e4SLinus Torvalds { 43947a4e344cSTejun Heo int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; 43957a4e344cSTejun Heo int i, cpu; 4396c34056a3STejun Heo 43977c3eed5cSTejun Heo /* make sure we have enough bits for OFFQ pool ID */ 43987c3eed5cSTejun Heo BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) < 43996be19588SLai Jiangshan WORK_CPU_END * NR_STD_WORKER_POOLS); 4400b5490077STejun Heo 4401e904e6c2STejun Heo WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); 4402e904e6c2STejun Heo 4403e904e6c2STejun Heo pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); 4404e904e6c2STejun Heo 440565758202STejun Heo cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP); 4406a5b4e57dSLai Jiangshan hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN); 44078b03ae3cSTejun Heo 4408706026c2STejun Heo /* initialize CPU pools */ 440929c91e99STejun Heo for_each_possible_cpu(cpu) { 44104ce62e9eSTejun Heo struct worker_pool *pool; 44118b03ae3cSTejun Heo 44127a4e344cSTejun Heo i = 0; 4413f02ae73aSTejun Heo for_each_cpu_worker_pool(pool, cpu) { 44147a4e344cSTejun Heo BUG_ON(init_worker_pool(pool)); 4415ec22ca5eSTejun Heo pool->cpu = cpu; 44167a4e344cSTejun Heo cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); 44177a4e344cSTejun Heo pool->attrs->nice = std_nice[i++]; 44187a4e344cSTejun Heo 44199daf9e67STejun Heo /* alloc pool ID */ 44209daf9e67STejun Heo BUG_ON(worker_pool_assign_id(pool)); 44214ce62e9eSTejun Heo } 44228b03ae3cSTejun Heo } 44238b03ae3cSTejun Heo 4424e22bee78STejun Heo /* create the initial worker */ 442529c91e99STejun Heo for_each_online_cpu(cpu) { 44264ce62e9eSTejun Heo struct worker_pool *pool; 4427e22bee78STejun Heo 4428f02ae73aSTejun Heo for_each_cpu_worker_pool(pool, cpu) { 442924647570STejun Heo pool->flags &= ~POOL_DISASSOCIATED; 4430ebf44d16STejun Heo BUG_ON(create_and_start_worker(pool) < 0); 4431e22bee78STejun Heo } 44324ce62e9eSTejun Heo } 4433e22bee78STejun Heo 443429c91e99STejun Heo /* create default unbound wq attrs */ 443529c91e99STejun Heo for (i = 0; i < NR_STD_WORKER_POOLS; i++) { 443629c91e99STejun Heo struct workqueue_attrs *attrs; 443729c91e99STejun Heo 443829c91e99STejun Heo BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); 443929c91e99STejun Heo 444029c91e99STejun Heo attrs->nice = std_nice[i]; 444129c91e99STejun Heo cpumask_setall(attrs->cpumask); 444229c91e99STejun Heo 444329c91e99STejun Heo unbound_std_wq_attrs[i] = attrs; 444429c91e99STejun Heo } 444529c91e99STejun Heo 4446d320c038STejun Heo system_wq = alloc_workqueue("events", 0, 0); 44471aabe902SJoonsoo Kim system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0); 4448d320c038STejun Heo system_long_wq = alloc_workqueue("events_long", 0, 0); 4449f3421797STejun Heo system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, 4450f3421797STejun Heo WQ_UNBOUND_MAX_ACTIVE); 445124d51addSTejun Heo system_freezable_wq = alloc_workqueue("events_freezable", 445224d51addSTejun Heo WQ_FREEZABLE, 0); 44531aabe902SJoonsoo Kim BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq || 4454ae930e0fSTejun Heo !system_unbound_wq || !system_freezable_wq); 44556ee0578bSSuresh Siddha return 0; 44561da177e4SLinus Torvalds } 44576ee0578bSSuresh Siddha early_initcall(init_workqueues); 4458