11da177e4SLinus Torvalds /* 2c54fce6eSTejun Heo * kernel/workqueue.c - generic async execution with shared worker pool 31da177e4SLinus Torvalds * 4c54fce6eSTejun Heo * Copyright (C) 2002 Ingo Molnar 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Derived from the taskqueue/keventd code by: 71da177e4SLinus Torvalds * David Woodhouse <dwmw2@infradead.org> 8e1f8e874SFrancois Cami * Andrew Morton 91da177e4SLinus Torvalds * Kai Petzke <wpp@marie.physik.tu-berlin.de> 101da177e4SLinus Torvalds * Theodore Ts'o <tytso@mit.edu> 1189ada679SChristoph Lameter * 12cde53535SChristoph Lameter * Made to use alloc_percpu by Christoph Lameter. 13c54fce6eSTejun Heo * 14c54fce6eSTejun Heo * Copyright (C) 2010 SUSE Linux Products GmbH 15c54fce6eSTejun Heo * Copyright (C) 2010 Tejun Heo <tj@kernel.org> 16c54fce6eSTejun Heo * 17c54fce6eSTejun Heo * This is the generic async execution mechanism. Work items as are 18c54fce6eSTejun Heo * executed in process context. The worker pool is shared and 19c54fce6eSTejun Heo * automatically managed. There is one worker pool for each CPU and 20c54fce6eSTejun Heo * one extra for works which are better served by workers which are 21c54fce6eSTejun Heo * not bound to any specific CPU. 22c54fce6eSTejun Heo * 23c54fce6eSTejun Heo * Please read Documentation/workqueue.txt for details. 241da177e4SLinus Torvalds */ 251da177e4SLinus Torvalds 269984de1aSPaul Gortmaker #include <linux/export.h> 271da177e4SLinus Torvalds #include <linux/kernel.h> 281da177e4SLinus Torvalds #include <linux/sched.h> 291da177e4SLinus Torvalds #include <linux/init.h> 301da177e4SLinus Torvalds #include <linux/signal.h> 311da177e4SLinus Torvalds #include <linux/completion.h> 321da177e4SLinus Torvalds #include <linux/workqueue.h> 331da177e4SLinus Torvalds #include <linux/slab.h> 341da177e4SLinus Torvalds #include <linux/cpu.h> 351da177e4SLinus Torvalds #include <linux/notifier.h> 361da177e4SLinus Torvalds #include <linux/kthread.h> 371fa44ecaSJames Bottomley #include <linux/hardirq.h> 3846934023SChristoph Lameter #include <linux/mempolicy.h> 39341a5958SRafael J. Wysocki #include <linux/freezer.h> 40d5abe669SPeter Zijlstra #include <linux/kallsyms.h> 41d5abe669SPeter Zijlstra #include <linux/debug_locks.h> 424e6045f1SJohannes Berg #include <linux/lockdep.h> 43c34056a3STejun Heo #include <linux/idr.h> 4429c91e99STejun Heo #include <linux/jhash.h> 4542f8570fSSasha Levin #include <linux/hashtable.h> 4676af4d93STejun Heo #include <linux/rculist.h> 47e22bee78STejun Heo 48ea138446STejun Heo #include "workqueue_internal.h" 491da177e4SLinus Torvalds 50c8e55f36STejun Heo enum { 51bc2ae0f5STejun Heo /* 5224647570STejun Heo * worker_pool flags 53bc2ae0f5STejun Heo * 5424647570STejun Heo * A bound pool is either associated or disassociated with its CPU. 55bc2ae0f5STejun Heo * While associated (!DISASSOCIATED), all workers are bound to the 56bc2ae0f5STejun Heo * CPU and none has %WORKER_UNBOUND set and concurrency management 57bc2ae0f5STejun Heo * is in effect. 58bc2ae0f5STejun Heo * 59bc2ae0f5STejun Heo * While DISASSOCIATED, the cpu may be offline and all workers have 60bc2ae0f5STejun Heo * %WORKER_UNBOUND set and concurrency management disabled, and may 6124647570STejun Heo * be executing on any CPU. The pool behaves as an unbound one. 62bc2ae0f5STejun Heo * 63bc2ae0f5STejun Heo * Note that DISASSOCIATED can be flipped only while holding 6424647570STejun Heo * assoc_mutex to avoid changing binding state while 6524647570STejun Heo * create_worker() is in progress. 66bc2ae0f5STejun Heo */ 6711ebea50STejun Heo POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ 6824647570STejun Heo POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ 6935b6bb63STejun Heo POOL_FREEZING = 1 << 3, /* freeze in progress */ 70db7bccf4STejun Heo 71c8e55f36STejun Heo /* worker flags */ 72c8e55f36STejun Heo WORKER_STARTED = 1 << 0, /* started */ 73c8e55f36STejun Heo WORKER_DIE = 1 << 1, /* die die die */ 74c8e55f36STejun Heo WORKER_IDLE = 1 << 2, /* is idle */ 75e22bee78STejun Heo WORKER_PREP = 1 << 3, /* preparing to run works */ 76fb0e7bebSTejun Heo WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ 77f3421797STejun Heo WORKER_UNBOUND = 1 << 7, /* worker is unbound */ 78e22bee78STejun Heo 795f7dabfdSLai Jiangshan WORKER_NOT_RUNNING = WORKER_PREP | WORKER_UNBOUND | 80403c821dSTejun Heo WORKER_CPU_INTENSIVE, 81db7bccf4STejun Heo 82e34cdddbSTejun Heo NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */ 834ce62e9eSTejun Heo 8429c91e99STejun Heo UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */ 85c8e55f36STejun Heo BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ 86db7bccf4STejun Heo 87e22bee78STejun Heo MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ 88e22bee78STejun Heo IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ 89e22bee78STejun Heo 903233cdbdSTejun Heo MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, 913233cdbdSTejun Heo /* call for help after 10ms 923233cdbdSTejun Heo (min two ticks) */ 93e22bee78STejun Heo MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ 94e22bee78STejun Heo CREATE_COOLDOWN = HZ, /* time to breath after fail */ 951da177e4SLinus Torvalds 961da177e4SLinus Torvalds /* 97e22bee78STejun Heo * Rescue workers are used only on emergencies and shared by 98e22bee78STejun Heo * all cpus. Give -20. 99e22bee78STejun Heo */ 100e22bee78STejun Heo RESCUER_NICE_LEVEL = -20, 1013270476aSTejun Heo HIGHPRI_NICE_LEVEL = -20, 102c8e55f36STejun Heo }; 103c8e55f36STejun Heo 1041da177e4SLinus Torvalds /* 1054690c4abSTejun Heo * Structure fields follow one of the following exclusion rules. 1064690c4abSTejun Heo * 107e41e704bSTejun Heo * I: Modifiable by initialization/destruction paths and read-only for 108e41e704bSTejun Heo * everyone else. 1094690c4abSTejun Heo * 110e22bee78STejun Heo * P: Preemption protected. Disabling preemption is enough and should 111e22bee78STejun Heo * only be modified and accessed from the local cpu. 112e22bee78STejun Heo * 113d565ed63STejun Heo * L: pool->lock protected. Access with pool->lock held. 1144690c4abSTejun Heo * 115d565ed63STejun Heo * X: During normal operation, modification requires pool->lock and should 116d565ed63STejun Heo * be done only from local cpu. Either disabling preemption on local 117d565ed63STejun Heo * cpu or grabbing pool->lock is enough for read access. If 118d565ed63STejun Heo * POOL_DISASSOCIATED is set, it's identical to L. 119e22bee78STejun Heo * 12073f53c4aSTejun Heo * F: wq->flush_mutex protected. 12173f53c4aSTejun Heo * 1224690c4abSTejun Heo * W: workqueue_lock protected. 12376af4d93STejun Heo * 12476af4d93STejun Heo * R: workqueue_lock protected for writes. Sched-RCU protected for reads. 12575ccf595STejun Heo * 12675ccf595STejun Heo * FR: wq->flush_mutex and workqueue_lock protected for writes. Sched-RCU 12775ccf595STejun Heo * protected for reads. 1284690c4abSTejun Heo */ 1294690c4abSTejun Heo 1302eaebdb3STejun Heo /* struct worker is defined in workqueue_internal.h */ 131c34056a3STejun Heo 132bd7bdd43STejun Heo struct worker_pool { 133d565ed63STejun Heo spinlock_t lock; /* the pool lock */ 134d84ff051STejun Heo int cpu; /* I: the associated cpu */ 1359daf9e67STejun Heo int id; /* I: pool ID */ 13611ebea50STejun Heo unsigned int flags; /* X: flags */ 137bd7bdd43STejun Heo 138bd7bdd43STejun Heo struct list_head worklist; /* L: list of pending works */ 139bd7bdd43STejun Heo int nr_workers; /* L: total number of workers */ 140ea1abd61SLai Jiangshan 141ea1abd61SLai Jiangshan /* nr_idle includes the ones off idle_list for rebinding */ 142bd7bdd43STejun Heo int nr_idle; /* L: currently idle ones */ 143bd7bdd43STejun Heo 144bd7bdd43STejun Heo struct list_head idle_list; /* X: list of idle workers */ 145bd7bdd43STejun Heo struct timer_list idle_timer; /* L: worker idle timeout */ 146bd7bdd43STejun Heo struct timer_list mayday_timer; /* L: SOS timer for workers */ 147bd7bdd43STejun Heo 148c9e7cf27STejun Heo /* workers are chained either in busy_hash or idle_list */ 149c9e7cf27STejun Heo DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER); 150c9e7cf27STejun Heo /* L: hash of busy workers */ 151c9e7cf27STejun Heo 15234a06bd6STejun Heo struct mutex manager_arb; /* manager arbitration */ 15324647570STejun Heo struct mutex assoc_mutex; /* protect POOL_DISASSOCIATED */ 154bd7bdd43STejun Heo struct ida worker_ida; /* L: for worker IDs */ 155e19e397aSTejun Heo 1567a4e344cSTejun Heo struct workqueue_attrs *attrs; /* I: worker attributes */ 15729c91e99STejun Heo struct hlist_node hash_node; /* R: unbound_pool_hash node */ 15829c91e99STejun Heo int refcnt; /* refcnt for unbound pools */ 1597a4e344cSTejun Heo 160e19e397aSTejun Heo /* 161e19e397aSTejun Heo * The current concurrency level. As it's likely to be accessed 162e19e397aSTejun Heo * from other CPUs during try_to_wake_up(), put it in a separate 163e19e397aSTejun Heo * cacheline. 164e19e397aSTejun Heo */ 165e19e397aSTejun Heo atomic_t nr_running ____cacheline_aligned_in_smp; 16629c91e99STejun Heo 16729c91e99STejun Heo /* 16829c91e99STejun Heo * Destruction of pool is sched-RCU protected to allow dereferences 16929c91e99STejun Heo * from get_work_pool(). 17029c91e99STejun Heo */ 17129c91e99STejun Heo struct rcu_head rcu; 1728b03ae3cSTejun Heo } ____cacheline_aligned_in_smp; 1738b03ae3cSTejun Heo 1748b03ae3cSTejun Heo /* 175112202d9STejun Heo * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS 176112202d9STejun Heo * of work_struct->data are used for flags and the remaining high bits 177112202d9STejun Heo * point to the pwq; thus, pwqs need to be aligned at two's power of the 178112202d9STejun Heo * number of flag bits. 1791da177e4SLinus Torvalds */ 180112202d9STejun Heo struct pool_workqueue { 181bd7bdd43STejun Heo struct worker_pool *pool; /* I: the associated pool */ 1824690c4abSTejun Heo struct workqueue_struct *wq; /* I: the owning workqueue */ 18373f53c4aSTejun Heo int work_color; /* L: current color */ 18473f53c4aSTejun Heo int flush_color; /* L: flushing color */ 1858864b4e5STejun Heo int refcnt; /* L: reference count */ 18673f53c4aSTejun Heo int nr_in_flight[WORK_NR_COLORS]; 18773f53c4aSTejun Heo /* L: nr of in_flight works */ 1881e19ffc6STejun Heo int nr_active; /* L: nr of active works */ 189a0a1a5fdSTejun Heo int max_active; /* L: max active works */ 1901e19ffc6STejun Heo struct list_head delayed_works; /* L: delayed works */ 19175ccf595STejun Heo struct list_head pwqs_node; /* FR: node on wq->pwqs */ 192493a1724STejun Heo struct list_head mayday_node; /* W: node on wq->maydays */ 1938864b4e5STejun Heo 1948864b4e5STejun Heo /* 1958864b4e5STejun Heo * Release of unbound pwq is punted to system_wq. See put_pwq() 1968864b4e5STejun Heo * and pwq_unbound_release_workfn() for details. pool_workqueue 1978864b4e5STejun Heo * itself is also sched-RCU protected so that the first pwq can be 1988864b4e5STejun Heo * determined without grabbing workqueue_lock. 1998864b4e5STejun Heo */ 2008864b4e5STejun Heo struct work_struct unbound_release_work; 2018864b4e5STejun Heo struct rcu_head rcu; 202e904e6c2STejun Heo } __aligned(1 << WORK_STRUCT_FLAG_BITS); 2031da177e4SLinus Torvalds 2041da177e4SLinus Torvalds /* 20573f53c4aSTejun Heo * Structure used to wait for workqueue flush. 20673f53c4aSTejun Heo */ 20773f53c4aSTejun Heo struct wq_flusher { 20873f53c4aSTejun Heo struct list_head list; /* F: list of flushers */ 20973f53c4aSTejun Heo int flush_color; /* F: flush color waiting for */ 21073f53c4aSTejun Heo struct completion done; /* flush completion */ 21173f53c4aSTejun Heo }; 2121da177e4SLinus Torvalds 21373f53c4aSTejun Heo /* 2141da177e4SLinus Torvalds * The externally visible workqueue abstraction is an array of 2151da177e4SLinus Torvalds * per-CPU workqueues: 2161da177e4SLinus Torvalds */ 2171da177e4SLinus Torvalds struct workqueue_struct { 2189c5a2ba7STejun Heo unsigned int flags; /* W: WQ_* flags */ 219420c0ddbSTejun Heo struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwq's */ 22075ccf595STejun Heo struct list_head pwqs; /* FR: all pwqs of this wq */ 2214690c4abSTejun Heo struct list_head list; /* W: list of all workqueues */ 22273f53c4aSTejun Heo 22373f53c4aSTejun Heo struct mutex flush_mutex; /* protects wq flushing */ 22473f53c4aSTejun Heo int work_color; /* F: current work color */ 22573f53c4aSTejun Heo int flush_color; /* F: current flush color */ 226112202d9STejun Heo atomic_t nr_pwqs_to_flush; /* flush in progress */ 22773f53c4aSTejun Heo struct wq_flusher *first_flusher; /* F: first flusher */ 22873f53c4aSTejun Heo struct list_head flusher_queue; /* F: flush waiters */ 22973f53c4aSTejun Heo struct list_head flusher_overflow; /* F: flush overflow list */ 23073f53c4aSTejun Heo 231493a1724STejun Heo struct list_head maydays; /* W: pwqs requesting rescue */ 232e22bee78STejun Heo struct worker *rescuer; /* I: rescue worker */ 233e22bee78STejun Heo 2349c5a2ba7STejun Heo int nr_drainers; /* W: drain in progress */ 235112202d9STejun Heo int saved_max_active; /* W: saved pwq max_active */ 2364e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP 2374e6045f1SJohannes Berg struct lockdep_map lockdep_map; 2384e6045f1SJohannes Berg #endif 239b196be89STejun Heo char name[]; /* I: workqueue name */ 2401da177e4SLinus Torvalds }; 2411da177e4SLinus Torvalds 242e904e6c2STejun Heo static struct kmem_cache *pwq_cache; 243e904e6c2STejun Heo 24429c91e99STejun Heo /* hash of all unbound pools keyed by pool->attrs */ 24529c91e99STejun Heo static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER); 24629c91e99STejun Heo 24729c91e99STejun Heo static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; 24829c91e99STejun Heo 249d320c038STejun Heo struct workqueue_struct *system_wq __read_mostly; 250d320c038STejun Heo EXPORT_SYMBOL_GPL(system_wq); 251044c782cSValentin Ilie struct workqueue_struct *system_highpri_wq __read_mostly; 2521aabe902SJoonsoo Kim EXPORT_SYMBOL_GPL(system_highpri_wq); 253044c782cSValentin Ilie struct workqueue_struct *system_long_wq __read_mostly; 254d320c038STejun Heo EXPORT_SYMBOL_GPL(system_long_wq); 255044c782cSValentin Ilie struct workqueue_struct *system_unbound_wq __read_mostly; 256f3421797STejun Heo EXPORT_SYMBOL_GPL(system_unbound_wq); 257044c782cSValentin Ilie struct workqueue_struct *system_freezable_wq __read_mostly; 25824d51addSTejun Heo EXPORT_SYMBOL_GPL(system_freezable_wq); 259d320c038STejun Heo 26097bd2347STejun Heo #define CREATE_TRACE_POINTS 26197bd2347STejun Heo #include <trace/events/workqueue.h> 26297bd2347STejun Heo 26376af4d93STejun Heo #define assert_rcu_or_wq_lock() \ 26476af4d93STejun Heo rcu_lockdep_assert(rcu_read_lock_sched_held() || \ 26576af4d93STejun Heo lockdep_is_held(&workqueue_lock), \ 26676af4d93STejun Heo "sched RCU or workqueue lock should be held") 26776af4d93STejun Heo 268f02ae73aSTejun Heo #define for_each_cpu_worker_pool(pool, cpu) \ 269f02ae73aSTejun Heo for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ 270f02ae73aSTejun Heo (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ 2717a62c2c8STejun Heo (pool)++) 2724ce62e9eSTejun Heo 273b67bfe0dSSasha Levin #define for_each_busy_worker(worker, i, pool) \ 274b67bfe0dSSasha Levin hash_for_each(pool->busy_hash, i, worker, hentry) 275db7bccf4STejun Heo 27649e3cf44STejun Heo /** 27717116969STejun Heo * for_each_pool - iterate through all worker_pools in the system 27817116969STejun Heo * @pool: iteration cursor 27917116969STejun Heo * @id: integer used for iteration 280fa1b54e6STejun Heo * 281fa1b54e6STejun Heo * This must be called either with workqueue_lock held or sched RCU read 282fa1b54e6STejun Heo * locked. If the pool needs to be used beyond the locking in effect, the 283fa1b54e6STejun Heo * caller is responsible for guaranteeing that the pool stays online. 284fa1b54e6STejun Heo * 285fa1b54e6STejun Heo * The if/else clause exists only for the lockdep assertion and can be 286fa1b54e6STejun Heo * ignored. 28717116969STejun Heo */ 28817116969STejun Heo #define for_each_pool(pool, id) \ 289fa1b54e6STejun Heo idr_for_each_entry(&worker_pool_idr, pool, id) \ 290fa1b54e6STejun Heo if (({ assert_rcu_or_wq_lock(); false; })) { } \ 291fa1b54e6STejun Heo else 29217116969STejun Heo 29317116969STejun Heo /** 29449e3cf44STejun Heo * for_each_pwq - iterate through all pool_workqueues of the specified workqueue 29549e3cf44STejun Heo * @pwq: iteration cursor 29649e3cf44STejun Heo * @wq: the target workqueue 29776af4d93STejun Heo * 29876af4d93STejun Heo * This must be called either with workqueue_lock held or sched RCU read 29976af4d93STejun Heo * locked. If the pwq needs to be used beyond the locking in effect, the 30076af4d93STejun Heo * caller is responsible for guaranteeing that the pwq stays online. 30176af4d93STejun Heo * 30276af4d93STejun Heo * The if/else clause exists only for the lockdep assertion and can be 30376af4d93STejun Heo * ignored. 30449e3cf44STejun Heo */ 30549e3cf44STejun Heo #define for_each_pwq(pwq, wq) \ 30676af4d93STejun Heo list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \ 30776af4d93STejun Heo if (({ assert_rcu_or_wq_lock(); false; })) { } \ 30876af4d93STejun Heo else 309f3421797STejun Heo 310dc186ad7SThomas Gleixner #ifdef CONFIG_DEBUG_OBJECTS_WORK 311dc186ad7SThomas Gleixner 312dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr; 313dc186ad7SThomas Gleixner 31499777288SStanislaw Gruszka static void *work_debug_hint(void *addr) 31599777288SStanislaw Gruszka { 31699777288SStanislaw Gruszka return ((struct work_struct *) addr)->func; 31799777288SStanislaw Gruszka } 31899777288SStanislaw Gruszka 319dc186ad7SThomas Gleixner /* 320dc186ad7SThomas Gleixner * fixup_init is called when: 321dc186ad7SThomas Gleixner * - an active object is initialized 322dc186ad7SThomas Gleixner */ 323dc186ad7SThomas Gleixner static int work_fixup_init(void *addr, enum debug_obj_state state) 324dc186ad7SThomas Gleixner { 325dc186ad7SThomas Gleixner struct work_struct *work = addr; 326dc186ad7SThomas Gleixner 327dc186ad7SThomas Gleixner switch (state) { 328dc186ad7SThomas Gleixner case ODEBUG_STATE_ACTIVE: 329dc186ad7SThomas Gleixner cancel_work_sync(work); 330dc186ad7SThomas Gleixner debug_object_init(work, &work_debug_descr); 331dc186ad7SThomas Gleixner return 1; 332dc186ad7SThomas Gleixner default: 333dc186ad7SThomas Gleixner return 0; 334dc186ad7SThomas Gleixner } 335dc186ad7SThomas Gleixner } 336dc186ad7SThomas Gleixner 337dc186ad7SThomas Gleixner /* 338dc186ad7SThomas Gleixner * fixup_activate is called when: 339dc186ad7SThomas Gleixner * - an active object is activated 340dc186ad7SThomas Gleixner * - an unknown object is activated (might be a statically initialized object) 341dc186ad7SThomas Gleixner */ 342dc186ad7SThomas Gleixner static int work_fixup_activate(void *addr, enum debug_obj_state state) 343dc186ad7SThomas Gleixner { 344dc186ad7SThomas Gleixner struct work_struct *work = addr; 345dc186ad7SThomas Gleixner 346dc186ad7SThomas Gleixner switch (state) { 347dc186ad7SThomas Gleixner 348dc186ad7SThomas Gleixner case ODEBUG_STATE_NOTAVAILABLE: 349dc186ad7SThomas Gleixner /* 350dc186ad7SThomas Gleixner * This is not really a fixup. The work struct was 351dc186ad7SThomas Gleixner * statically initialized. We just make sure that it 352dc186ad7SThomas Gleixner * is tracked in the object tracker. 353dc186ad7SThomas Gleixner */ 35422df02bbSTejun Heo if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) { 355dc186ad7SThomas Gleixner debug_object_init(work, &work_debug_descr); 356dc186ad7SThomas Gleixner debug_object_activate(work, &work_debug_descr); 357dc186ad7SThomas Gleixner return 0; 358dc186ad7SThomas Gleixner } 359dc186ad7SThomas Gleixner WARN_ON_ONCE(1); 360dc186ad7SThomas Gleixner return 0; 361dc186ad7SThomas Gleixner 362dc186ad7SThomas Gleixner case ODEBUG_STATE_ACTIVE: 363dc186ad7SThomas Gleixner WARN_ON(1); 364dc186ad7SThomas Gleixner 365dc186ad7SThomas Gleixner default: 366dc186ad7SThomas Gleixner return 0; 367dc186ad7SThomas Gleixner } 368dc186ad7SThomas Gleixner } 369dc186ad7SThomas Gleixner 370dc186ad7SThomas Gleixner /* 371dc186ad7SThomas Gleixner * fixup_free is called when: 372dc186ad7SThomas Gleixner * - an active object is freed 373dc186ad7SThomas Gleixner */ 374dc186ad7SThomas Gleixner static int work_fixup_free(void *addr, enum debug_obj_state state) 375dc186ad7SThomas Gleixner { 376dc186ad7SThomas Gleixner struct work_struct *work = addr; 377dc186ad7SThomas Gleixner 378dc186ad7SThomas Gleixner switch (state) { 379dc186ad7SThomas Gleixner case ODEBUG_STATE_ACTIVE: 380dc186ad7SThomas Gleixner cancel_work_sync(work); 381dc186ad7SThomas Gleixner debug_object_free(work, &work_debug_descr); 382dc186ad7SThomas Gleixner return 1; 383dc186ad7SThomas Gleixner default: 384dc186ad7SThomas Gleixner return 0; 385dc186ad7SThomas Gleixner } 386dc186ad7SThomas Gleixner } 387dc186ad7SThomas Gleixner 388dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr = { 389dc186ad7SThomas Gleixner .name = "work_struct", 39099777288SStanislaw Gruszka .debug_hint = work_debug_hint, 391dc186ad7SThomas Gleixner .fixup_init = work_fixup_init, 392dc186ad7SThomas Gleixner .fixup_activate = work_fixup_activate, 393dc186ad7SThomas Gleixner .fixup_free = work_fixup_free, 394dc186ad7SThomas Gleixner }; 395dc186ad7SThomas Gleixner 396dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work) 397dc186ad7SThomas Gleixner { 398dc186ad7SThomas Gleixner debug_object_activate(work, &work_debug_descr); 399dc186ad7SThomas Gleixner } 400dc186ad7SThomas Gleixner 401dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work) 402dc186ad7SThomas Gleixner { 403dc186ad7SThomas Gleixner debug_object_deactivate(work, &work_debug_descr); 404dc186ad7SThomas Gleixner } 405dc186ad7SThomas Gleixner 406dc186ad7SThomas Gleixner void __init_work(struct work_struct *work, int onstack) 407dc186ad7SThomas Gleixner { 408dc186ad7SThomas Gleixner if (onstack) 409dc186ad7SThomas Gleixner debug_object_init_on_stack(work, &work_debug_descr); 410dc186ad7SThomas Gleixner else 411dc186ad7SThomas Gleixner debug_object_init(work, &work_debug_descr); 412dc186ad7SThomas Gleixner } 413dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(__init_work); 414dc186ad7SThomas Gleixner 415dc186ad7SThomas Gleixner void destroy_work_on_stack(struct work_struct *work) 416dc186ad7SThomas Gleixner { 417dc186ad7SThomas Gleixner debug_object_free(work, &work_debug_descr); 418dc186ad7SThomas Gleixner } 419dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(destroy_work_on_stack); 420dc186ad7SThomas Gleixner 421dc186ad7SThomas Gleixner #else 422dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work) { } 423dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work) { } 424dc186ad7SThomas Gleixner #endif 425dc186ad7SThomas Gleixner 42695402b38SGautham R Shenoy /* Serializes the accesses to the list of workqueues. */ 42795402b38SGautham R Shenoy static DEFINE_SPINLOCK(workqueue_lock); 4281da177e4SLinus Torvalds static LIST_HEAD(workqueues); 429a0a1a5fdSTejun Heo static bool workqueue_freezing; /* W: have wqs started freezing? */ 4301da177e4SLinus Torvalds 43114441960SOleg Nesterov /* 432e19e397aSTejun Heo * The CPU and unbound standard worker pools. The unbound ones have 433e19e397aSTejun Heo * POOL_DISASSOCIATED set, and their workers have WORKER_UNBOUND set. 43414441960SOleg Nesterov */ 435e19e397aSTejun Heo static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], 436f02ae73aSTejun Heo cpu_worker_pools); 437f3421797STejun Heo 438fa1b54e6STejun Heo /* 439fa1b54e6STejun Heo * idr of all pools. Modifications are protected by workqueue_lock. Read 440fa1b54e6STejun Heo * accesses are protected by sched-RCU protected. 441fa1b54e6STejun Heo */ 4429daf9e67STejun Heo static DEFINE_IDR(worker_pool_idr); 4439daf9e67STejun Heo 444c34056a3STejun Heo static int worker_thread(void *__worker); 4451da177e4SLinus Torvalds 4469daf9e67STejun Heo /* allocate ID and assign it to @pool */ 4479daf9e67STejun Heo static int worker_pool_assign_id(struct worker_pool *pool) 4489daf9e67STejun Heo { 4499daf9e67STejun Heo int ret; 4509daf9e67STejun Heo 451fa1b54e6STejun Heo do { 452fa1b54e6STejun Heo if (!idr_pre_get(&worker_pool_idr, GFP_KERNEL)) 453fa1b54e6STejun Heo return -ENOMEM; 454fa1b54e6STejun Heo 455fa1b54e6STejun Heo spin_lock_irq(&workqueue_lock); 4569daf9e67STejun Heo ret = idr_get_new(&worker_pool_idr, pool, &pool->id); 457fa1b54e6STejun Heo spin_unlock_irq(&workqueue_lock); 458fa1b54e6STejun Heo } while (ret == -EAGAIN); 4599daf9e67STejun Heo 4609daf9e67STejun Heo return ret; 4619daf9e67STejun Heo } 4629daf9e67STejun Heo 46376af4d93STejun Heo /** 46476af4d93STejun Heo * first_pwq - return the first pool_workqueue of the specified workqueue 46576af4d93STejun Heo * @wq: the target workqueue 46676af4d93STejun Heo * 46776af4d93STejun Heo * This must be called either with workqueue_lock held or sched RCU read 46876af4d93STejun Heo * locked. If the pwq needs to be used beyond the locking in effect, the 46976af4d93STejun Heo * caller is responsible for guaranteeing that the pwq stays online. 47076af4d93STejun Heo */ 4717fb98ea7STejun Heo static struct pool_workqueue *first_pwq(struct workqueue_struct *wq) 472a848e3b6SOleg Nesterov { 47376af4d93STejun Heo assert_rcu_or_wq_lock(); 47476af4d93STejun Heo return list_first_or_null_rcu(&wq->pwqs, struct pool_workqueue, 47576af4d93STejun Heo pwqs_node); 476f3421797STejun Heo } 477a848e3b6SOleg Nesterov 47873f53c4aSTejun Heo static unsigned int work_color_to_flags(int color) 47973f53c4aSTejun Heo { 48073f53c4aSTejun Heo return color << WORK_STRUCT_COLOR_SHIFT; 48173f53c4aSTejun Heo } 48273f53c4aSTejun Heo 48373f53c4aSTejun Heo static int get_work_color(struct work_struct *work) 48473f53c4aSTejun Heo { 48573f53c4aSTejun Heo return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) & 48673f53c4aSTejun Heo ((1 << WORK_STRUCT_COLOR_BITS) - 1); 48773f53c4aSTejun Heo } 48873f53c4aSTejun Heo 48973f53c4aSTejun Heo static int work_next_color(int color) 49073f53c4aSTejun Heo { 49173f53c4aSTejun Heo return (color + 1) % WORK_NR_COLORS; 492b1f4ec17SOleg Nesterov } 493b1f4ec17SOleg Nesterov 4944594bf15SDavid Howells /* 495112202d9STejun Heo * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data 496112202d9STejun Heo * contain the pointer to the queued pwq. Once execution starts, the flag 4977c3eed5cSTejun Heo * is cleared and the high bits contain OFFQ flags and pool ID. 4987a22ad75STejun Heo * 499112202d9STejun Heo * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling() 500112202d9STejun Heo * and clear_work_data() can be used to set the pwq, pool or clear 501bbb68dfaSTejun Heo * work->data. These functions should only be called while the work is 502bbb68dfaSTejun Heo * owned - ie. while the PENDING bit is set. 5037a22ad75STejun Heo * 504112202d9STejun Heo * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq 5057c3eed5cSTejun Heo * corresponding to a work. Pool is available once the work has been 506112202d9STejun Heo * queued anywhere after initialization until it is sync canceled. pwq is 5077c3eed5cSTejun Heo * available only while the work item is queued. 508bbb68dfaSTejun Heo * 509bbb68dfaSTejun Heo * %WORK_OFFQ_CANCELING is used to mark a work item which is being 510bbb68dfaSTejun Heo * canceled. While being canceled, a work item may have its PENDING set 511bbb68dfaSTejun Heo * but stay off timer and worklist for arbitrarily long and nobody should 512bbb68dfaSTejun Heo * try to steal the PENDING bit. 5134594bf15SDavid Howells */ 5147a22ad75STejun Heo static inline void set_work_data(struct work_struct *work, unsigned long data, 5157a22ad75STejun Heo unsigned long flags) 5167a22ad75STejun Heo { 5176183c009STejun Heo WARN_ON_ONCE(!work_pending(work)); 5187a22ad75STejun Heo atomic_long_set(&work->data, data | flags | work_static(work)); 5197a22ad75STejun Heo } 5207a22ad75STejun Heo 521112202d9STejun Heo static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, 5224690c4abSTejun Heo unsigned long extra_flags) 523365970a1SDavid Howells { 524112202d9STejun Heo set_work_data(work, (unsigned long)pwq, 525112202d9STejun Heo WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags); 526365970a1SDavid Howells } 527365970a1SDavid Howells 5284468a00fSLai Jiangshan static void set_work_pool_and_keep_pending(struct work_struct *work, 5294468a00fSLai Jiangshan int pool_id) 5304468a00fSLai Jiangshan { 5314468a00fSLai Jiangshan set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 5324468a00fSLai Jiangshan WORK_STRUCT_PENDING); 5334468a00fSLai Jiangshan } 5344468a00fSLai Jiangshan 5357c3eed5cSTejun Heo static void set_work_pool_and_clear_pending(struct work_struct *work, 5367c3eed5cSTejun Heo int pool_id) 5374d707b9fSOleg Nesterov { 53823657bb1STejun Heo /* 53923657bb1STejun Heo * The following wmb is paired with the implied mb in 54023657bb1STejun Heo * test_and_set_bit(PENDING) and ensures all updates to @work made 54123657bb1STejun Heo * here are visible to and precede any updates by the next PENDING 54223657bb1STejun Heo * owner. 54323657bb1STejun Heo */ 54423657bb1STejun Heo smp_wmb(); 5457c3eed5cSTejun Heo set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); 5464d707b9fSOleg Nesterov } 5474d707b9fSOleg Nesterov 5487a22ad75STejun Heo static void clear_work_data(struct work_struct *work) 549365970a1SDavid Howells { 5507c3eed5cSTejun Heo smp_wmb(); /* see set_work_pool_and_clear_pending() */ 5517c3eed5cSTejun Heo set_work_data(work, WORK_STRUCT_NO_POOL, 0); 5527a22ad75STejun Heo } 5537a22ad75STejun Heo 554112202d9STejun Heo static struct pool_workqueue *get_work_pwq(struct work_struct *work) 5557a22ad75STejun Heo { 556e120153dSTejun Heo unsigned long data = atomic_long_read(&work->data); 5577a22ad75STejun Heo 558112202d9STejun Heo if (data & WORK_STRUCT_PWQ) 559e120153dSTejun Heo return (void *)(data & WORK_STRUCT_WQ_DATA_MASK); 560e120153dSTejun Heo else 561e120153dSTejun Heo return NULL; 5627a22ad75STejun Heo } 5637a22ad75STejun Heo 5647c3eed5cSTejun Heo /** 5657c3eed5cSTejun Heo * get_work_pool - return the worker_pool a given work was associated with 5667c3eed5cSTejun Heo * @work: the work item of interest 5677c3eed5cSTejun Heo * 5687c3eed5cSTejun Heo * Return the worker_pool @work was last associated with. %NULL if none. 569fa1b54e6STejun Heo * 570fa1b54e6STejun Heo * Pools are created and destroyed under workqueue_lock, and allows read 571fa1b54e6STejun Heo * access under sched-RCU read lock. As such, this function should be 572fa1b54e6STejun Heo * called under workqueue_lock or with preemption disabled. 573fa1b54e6STejun Heo * 574fa1b54e6STejun Heo * All fields of the returned pool are accessible as long as the above 575fa1b54e6STejun Heo * mentioned locking is in effect. If the returned pool needs to be used 576fa1b54e6STejun Heo * beyond the critical section, the caller is responsible for ensuring the 577fa1b54e6STejun Heo * returned pool is and stays online. 5787c3eed5cSTejun Heo */ 5797c3eed5cSTejun Heo static struct worker_pool *get_work_pool(struct work_struct *work) 5807a22ad75STejun Heo { 581e120153dSTejun Heo unsigned long data = atomic_long_read(&work->data); 5827c3eed5cSTejun Heo int pool_id; 5837a22ad75STejun Heo 584fa1b54e6STejun Heo assert_rcu_or_wq_lock(); 585fa1b54e6STejun Heo 586112202d9STejun Heo if (data & WORK_STRUCT_PWQ) 587112202d9STejun Heo return ((struct pool_workqueue *) 5887c3eed5cSTejun Heo (data & WORK_STRUCT_WQ_DATA_MASK))->pool; 5897a22ad75STejun Heo 5907c3eed5cSTejun Heo pool_id = data >> WORK_OFFQ_POOL_SHIFT; 5917c3eed5cSTejun Heo if (pool_id == WORK_OFFQ_POOL_NONE) 5927a22ad75STejun Heo return NULL; 5937a22ad75STejun Heo 594fa1b54e6STejun Heo return idr_find(&worker_pool_idr, pool_id); 5957c3eed5cSTejun Heo } 5967c3eed5cSTejun Heo 5977c3eed5cSTejun Heo /** 5987c3eed5cSTejun Heo * get_work_pool_id - return the worker pool ID a given work is associated with 5997c3eed5cSTejun Heo * @work: the work item of interest 6007c3eed5cSTejun Heo * 6017c3eed5cSTejun Heo * Return the worker_pool ID @work was last associated with. 6027c3eed5cSTejun Heo * %WORK_OFFQ_POOL_NONE if none. 6037c3eed5cSTejun Heo */ 6047c3eed5cSTejun Heo static int get_work_pool_id(struct work_struct *work) 6057c3eed5cSTejun Heo { 60654d5b7d0SLai Jiangshan unsigned long data = atomic_long_read(&work->data); 6077c3eed5cSTejun Heo 608112202d9STejun Heo if (data & WORK_STRUCT_PWQ) 609112202d9STejun Heo return ((struct pool_workqueue *) 61054d5b7d0SLai Jiangshan (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id; 61154d5b7d0SLai Jiangshan 61254d5b7d0SLai Jiangshan return data >> WORK_OFFQ_POOL_SHIFT; 6137c3eed5cSTejun Heo } 6147c3eed5cSTejun Heo 615bbb68dfaSTejun Heo static void mark_work_canceling(struct work_struct *work) 616bbb68dfaSTejun Heo { 6177c3eed5cSTejun Heo unsigned long pool_id = get_work_pool_id(work); 618bbb68dfaSTejun Heo 6197c3eed5cSTejun Heo pool_id <<= WORK_OFFQ_POOL_SHIFT; 6207c3eed5cSTejun Heo set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING); 621bbb68dfaSTejun Heo } 622bbb68dfaSTejun Heo 623bbb68dfaSTejun Heo static bool work_is_canceling(struct work_struct *work) 624bbb68dfaSTejun Heo { 625bbb68dfaSTejun Heo unsigned long data = atomic_long_read(&work->data); 626bbb68dfaSTejun Heo 627112202d9STejun Heo return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING); 628bbb68dfaSTejun Heo } 629bbb68dfaSTejun Heo 630e22bee78STejun Heo /* 6313270476aSTejun Heo * Policy functions. These define the policies on how the global worker 6323270476aSTejun Heo * pools are managed. Unless noted otherwise, these functions assume that 633d565ed63STejun Heo * they're being called with pool->lock held. 634e22bee78STejun Heo */ 635e22bee78STejun Heo 63663d95a91STejun Heo static bool __need_more_worker(struct worker_pool *pool) 637649027d7STejun Heo { 638e19e397aSTejun Heo return !atomic_read(&pool->nr_running); 639649027d7STejun Heo } 640649027d7STejun Heo 641e22bee78STejun Heo /* 642e22bee78STejun Heo * Need to wake up a worker? Called from anything but currently 643e22bee78STejun Heo * running workers. 644974271c4STejun Heo * 645974271c4STejun Heo * Note that, because unbound workers never contribute to nr_running, this 646706026c2STejun Heo * function will always return %true for unbound pools as long as the 647974271c4STejun Heo * worklist isn't empty. 648e22bee78STejun Heo */ 64963d95a91STejun Heo static bool need_more_worker(struct worker_pool *pool) 650e22bee78STejun Heo { 65163d95a91STejun Heo return !list_empty(&pool->worklist) && __need_more_worker(pool); 652e22bee78STejun Heo } 653e22bee78STejun Heo 654e22bee78STejun Heo /* Can I start working? Called from busy but !running workers. */ 65563d95a91STejun Heo static bool may_start_working(struct worker_pool *pool) 656e22bee78STejun Heo { 65763d95a91STejun Heo return pool->nr_idle; 658e22bee78STejun Heo } 659e22bee78STejun Heo 660e22bee78STejun Heo /* Do I need to keep working? Called from currently running workers. */ 66163d95a91STejun Heo static bool keep_working(struct worker_pool *pool) 662e22bee78STejun Heo { 663e19e397aSTejun Heo return !list_empty(&pool->worklist) && 664e19e397aSTejun Heo atomic_read(&pool->nr_running) <= 1; 665e22bee78STejun Heo } 666e22bee78STejun Heo 667e22bee78STejun Heo /* Do we need a new worker? Called from manager. */ 66863d95a91STejun Heo static bool need_to_create_worker(struct worker_pool *pool) 669e22bee78STejun Heo { 67063d95a91STejun Heo return need_more_worker(pool) && !may_start_working(pool); 671e22bee78STejun Heo } 672e22bee78STejun Heo 673e22bee78STejun Heo /* Do I need to be the manager? */ 67463d95a91STejun Heo static bool need_to_manage_workers(struct worker_pool *pool) 675e22bee78STejun Heo { 67663d95a91STejun Heo return need_to_create_worker(pool) || 67711ebea50STejun Heo (pool->flags & POOL_MANAGE_WORKERS); 678e22bee78STejun Heo } 679e22bee78STejun Heo 680e22bee78STejun Heo /* Do we have too many workers and should some go away? */ 68163d95a91STejun Heo static bool too_many_workers(struct worker_pool *pool) 682e22bee78STejun Heo { 68334a06bd6STejun Heo bool managing = mutex_is_locked(&pool->manager_arb); 68463d95a91STejun Heo int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ 68563d95a91STejun Heo int nr_busy = pool->nr_workers - nr_idle; 686e22bee78STejun Heo 687ea1abd61SLai Jiangshan /* 688ea1abd61SLai Jiangshan * nr_idle and idle_list may disagree if idle rebinding is in 689ea1abd61SLai Jiangshan * progress. Never return %true if idle_list is empty. 690ea1abd61SLai Jiangshan */ 691ea1abd61SLai Jiangshan if (list_empty(&pool->idle_list)) 692ea1abd61SLai Jiangshan return false; 693ea1abd61SLai Jiangshan 694e22bee78STejun Heo return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; 695e22bee78STejun Heo } 696e22bee78STejun Heo 697e22bee78STejun Heo /* 698e22bee78STejun Heo * Wake up functions. 699e22bee78STejun Heo */ 700e22bee78STejun Heo 7017e11629dSTejun Heo /* Return the first worker. Safe with preemption disabled */ 70263d95a91STejun Heo static struct worker *first_worker(struct worker_pool *pool) 7037e11629dSTejun Heo { 70463d95a91STejun Heo if (unlikely(list_empty(&pool->idle_list))) 7057e11629dSTejun Heo return NULL; 7067e11629dSTejun Heo 70763d95a91STejun Heo return list_first_entry(&pool->idle_list, struct worker, entry); 7087e11629dSTejun Heo } 7097e11629dSTejun Heo 7107e11629dSTejun Heo /** 7117e11629dSTejun Heo * wake_up_worker - wake up an idle worker 71263d95a91STejun Heo * @pool: worker pool to wake worker from 7137e11629dSTejun Heo * 71463d95a91STejun Heo * Wake up the first idle worker of @pool. 7157e11629dSTejun Heo * 7167e11629dSTejun Heo * CONTEXT: 717d565ed63STejun Heo * spin_lock_irq(pool->lock). 7187e11629dSTejun Heo */ 71963d95a91STejun Heo static void wake_up_worker(struct worker_pool *pool) 7207e11629dSTejun Heo { 72163d95a91STejun Heo struct worker *worker = first_worker(pool); 7227e11629dSTejun Heo 7237e11629dSTejun Heo if (likely(worker)) 7247e11629dSTejun Heo wake_up_process(worker->task); 7257e11629dSTejun Heo } 7267e11629dSTejun Heo 7274690c4abSTejun Heo /** 728e22bee78STejun Heo * wq_worker_waking_up - a worker is waking up 729e22bee78STejun Heo * @task: task waking up 730e22bee78STejun Heo * @cpu: CPU @task is waking up to 731e22bee78STejun Heo * 732e22bee78STejun Heo * This function is called during try_to_wake_up() when a worker is 733e22bee78STejun Heo * being awoken. 734e22bee78STejun Heo * 735e22bee78STejun Heo * CONTEXT: 736e22bee78STejun Heo * spin_lock_irq(rq->lock) 737e22bee78STejun Heo */ 738d84ff051STejun Heo void wq_worker_waking_up(struct task_struct *task, int cpu) 739e22bee78STejun Heo { 740e22bee78STejun Heo struct worker *worker = kthread_data(task); 741e22bee78STejun Heo 74236576000SJoonsoo Kim if (!(worker->flags & WORKER_NOT_RUNNING)) { 743ec22ca5eSTejun Heo WARN_ON_ONCE(worker->pool->cpu != cpu); 744e19e397aSTejun Heo atomic_inc(&worker->pool->nr_running); 745e22bee78STejun Heo } 74636576000SJoonsoo Kim } 747e22bee78STejun Heo 748e22bee78STejun Heo /** 749e22bee78STejun Heo * wq_worker_sleeping - a worker is going to sleep 750e22bee78STejun Heo * @task: task going to sleep 751e22bee78STejun Heo * @cpu: CPU in question, must be the current CPU number 752e22bee78STejun Heo * 753e22bee78STejun Heo * This function is called during schedule() when a busy worker is 754e22bee78STejun Heo * going to sleep. Worker on the same cpu can be woken up by 755e22bee78STejun Heo * returning pointer to its task. 756e22bee78STejun Heo * 757e22bee78STejun Heo * CONTEXT: 758e22bee78STejun Heo * spin_lock_irq(rq->lock) 759e22bee78STejun Heo * 760e22bee78STejun Heo * RETURNS: 761e22bee78STejun Heo * Worker task on @cpu to wake up, %NULL if none. 762e22bee78STejun Heo */ 763d84ff051STejun Heo struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu) 764e22bee78STejun Heo { 765e22bee78STejun Heo struct worker *worker = kthread_data(task), *to_wakeup = NULL; 766111c225aSTejun Heo struct worker_pool *pool; 767e22bee78STejun Heo 768111c225aSTejun Heo /* 769111c225aSTejun Heo * Rescuers, which may not have all the fields set up like normal 770111c225aSTejun Heo * workers, also reach here, let's not access anything before 771111c225aSTejun Heo * checking NOT_RUNNING. 772111c225aSTejun Heo */ 7732d64672eSSteven Rostedt if (worker->flags & WORKER_NOT_RUNNING) 774e22bee78STejun Heo return NULL; 775e22bee78STejun Heo 776111c225aSTejun Heo pool = worker->pool; 777111c225aSTejun Heo 778e22bee78STejun Heo /* this can only happen on the local cpu */ 7796183c009STejun Heo if (WARN_ON_ONCE(cpu != raw_smp_processor_id())) 7806183c009STejun Heo return NULL; 781e22bee78STejun Heo 782e22bee78STejun Heo /* 783e22bee78STejun Heo * The counterpart of the following dec_and_test, implied mb, 784e22bee78STejun Heo * worklist not empty test sequence is in insert_work(). 785e22bee78STejun Heo * Please read comment there. 786e22bee78STejun Heo * 787628c78e7STejun Heo * NOT_RUNNING is clear. This means that we're bound to and 788628c78e7STejun Heo * running on the local cpu w/ rq lock held and preemption 789628c78e7STejun Heo * disabled, which in turn means that none else could be 790d565ed63STejun Heo * manipulating idle_list, so dereferencing idle_list without pool 791628c78e7STejun Heo * lock is safe. 792e22bee78STejun Heo */ 793e19e397aSTejun Heo if (atomic_dec_and_test(&pool->nr_running) && 794e19e397aSTejun Heo !list_empty(&pool->worklist)) 79563d95a91STejun Heo to_wakeup = first_worker(pool); 796e22bee78STejun Heo return to_wakeup ? to_wakeup->task : NULL; 797e22bee78STejun Heo } 798e22bee78STejun Heo 799e22bee78STejun Heo /** 800e22bee78STejun Heo * worker_set_flags - set worker flags and adjust nr_running accordingly 801cb444766STejun Heo * @worker: self 802d302f017STejun Heo * @flags: flags to set 803d302f017STejun Heo * @wakeup: wakeup an idle worker if necessary 804d302f017STejun Heo * 805e22bee78STejun Heo * Set @flags in @worker->flags and adjust nr_running accordingly. If 806e22bee78STejun Heo * nr_running becomes zero and @wakeup is %true, an idle worker is 807e22bee78STejun Heo * woken up. 808d302f017STejun Heo * 809cb444766STejun Heo * CONTEXT: 810d565ed63STejun Heo * spin_lock_irq(pool->lock) 811d302f017STejun Heo */ 812d302f017STejun Heo static inline void worker_set_flags(struct worker *worker, unsigned int flags, 813d302f017STejun Heo bool wakeup) 814d302f017STejun Heo { 815bd7bdd43STejun Heo struct worker_pool *pool = worker->pool; 816e22bee78STejun Heo 817cb444766STejun Heo WARN_ON_ONCE(worker->task != current); 818cb444766STejun Heo 819e22bee78STejun Heo /* 820e22bee78STejun Heo * If transitioning into NOT_RUNNING, adjust nr_running and 821e22bee78STejun Heo * wake up an idle worker as necessary if requested by 822e22bee78STejun Heo * @wakeup. 823e22bee78STejun Heo */ 824e22bee78STejun Heo if ((flags & WORKER_NOT_RUNNING) && 825e22bee78STejun Heo !(worker->flags & WORKER_NOT_RUNNING)) { 826e22bee78STejun Heo if (wakeup) { 827e19e397aSTejun Heo if (atomic_dec_and_test(&pool->nr_running) && 828bd7bdd43STejun Heo !list_empty(&pool->worklist)) 82963d95a91STejun Heo wake_up_worker(pool); 830e22bee78STejun Heo } else 831e19e397aSTejun Heo atomic_dec(&pool->nr_running); 832e22bee78STejun Heo } 833e22bee78STejun Heo 834d302f017STejun Heo worker->flags |= flags; 835d302f017STejun Heo } 836d302f017STejun Heo 837d302f017STejun Heo /** 838e22bee78STejun Heo * worker_clr_flags - clear worker flags and adjust nr_running accordingly 839cb444766STejun Heo * @worker: self 840d302f017STejun Heo * @flags: flags to clear 841d302f017STejun Heo * 842e22bee78STejun Heo * Clear @flags in @worker->flags and adjust nr_running accordingly. 843d302f017STejun Heo * 844cb444766STejun Heo * CONTEXT: 845d565ed63STejun Heo * spin_lock_irq(pool->lock) 846d302f017STejun Heo */ 847d302f017STejun Heo static inline void worker_clr_flags(struct worker *worker, unsigned int flags) 848d302f017STejun Heo { 84963d95a91STejun Heo struct worker_pool *pool = worker->pool; 850e22bee78STejun Heo unsigned int oflags = worker->flags; 851e22bee78STejun Heo 852cb444766STejun Heo WARN_ON_ONCE(worker->task != current); 853cb444766STejun Heo 854d302f017STejun Heo worker->flags &= ~flags; 855e22bee78STejun Heo 85642c025f3STejun Heo /* 85742c025f3STejun Heo * If transitioning out of NOT_RUNNING, increment nr_running. Note 85842c025f3STejun Heo * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask 85942c025f3STejun Heo * of multiple flags, not a single flag. 86042c025f3STejun Heo */ 861e22bee78STejun Heo if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) 862e22bee78STejun Heo if (!(worker->flags & WORKER_NOT_RUNNING)) 863e19e397aSTejun Heo atomic_inc(&pool->nr_running); 864d302f017STejun Heo } 865d302f017STejun Heo 866d302f017STejun Heo /** 8678cca0eeaSTejun Heo * find_worker_executing_work - find worker which is executing a work 868c9e7cf27STejun Heo * @pool: pool of interest 8698cca0eeaSTejun Heo * @work: work to find worker for 8708cca0eeaSTejun Heo * 871c9e7cf27STejun Heo * Find a worker which is executing @work on @pool by searching 872c9e7cf27STejun Heo * @pool->busy_hash which is keyed by the address of @work. For a worker 873a2c1c57bSTejun Heo * to match, its current execution should match the address of @work and 874a2c1c57bSTejun Heo * its work function. This is to avoid unwanted dependency between 875a2c1c57bSTejun Heo * unrelated work executions through a work item being recycled while still 876a2c1c57bSTejun Heo * being executed. 877a2c1c57bSTejun Heo * 878a2c1c57bSTejun Heo * This is a bit tricky. A work item may be freed once its execution 879a2c1c57bSTejun Heo * starts and nothing prevents the freed area from being recycled for 880a2c1c57bSTejun Heo * another work item. If the same work item address ends up being reused 881a2c1c57bSTejun Heo * before the original execution finishes, workqueue will identify the 882a2c1c57bSTejun Heo * recycled work item as currently executing and make it wait until the 883a2c1c57bSTejun Heo * current execution finishes, introducing an unwanted dependency. 884a2c1c57bSTejun Heo * 885a2c1c57bSTejun Heo * This function checks the work item address, work function and workqueue 886a2c1c57bSTejun Heo * to avoid false positives. Note that this isn't complete as one may 887a2c1c57bSTejun Heo * construct a work function which can introduce dependency onto itself 888a2c1c57bSTejun Heo * through a recycled work item. Well, if somebody wants to shoot oneself 889a2c1c57bSTejun Heo * in the foot that badly, there's only so much we can do, and if such 890a2c1c57bSTejun Heo * deadlock actually occurs, it should be easy to locate the culprit work 891a2c1c57bSTejun Heo * function. 8928cca0eeaSTejun Heo * 8938cca0eeaSTejun Heo * CONTEXT: 894d565ed63STejun Heo * spin_lock_irq(pool->lock). 8958cca0eeaSTejun Heo * 8968cca0eeaSTejun Heo * RETURNS: 8978cca0eeaSTejun Heo * Pointer to worker which is executing @work if found, NULL 8988cca0eeaSTejun Heo * otherwise. 8998cca0eeaSTejun Heo */ 900c9e7cf27STejun Heo static struct worker *find_worker_executing_work(struct worker_pool *pool, 9018cca0eeaSTejun Heo struct work_struct *work) 9028cca0eeaSTejun Heo { 90342f8570fSSasha Levin struct worker *worker; 90442f8570fSSasha Levin 905b67bfe0dSSasha Levin hash_for_each_possible(pool->busy_hash, worker, hentry, 906a2c1c57bSTejun Heo (unsigned long)work) 907a2c1c57bSTejun Heo if (worker->current_work == work && 908a2c1c57bSTejun Heo worker->current_func == work->func) 90942f8570fSSasha Levin return worker; 91042f8570fSSasha Levin 91142f8570fSSasha Levin return NULL; 9128cca0eeaSTejun Heo } 9138cca0eeaSTejun Heo 9148cca0eeaSTejun Heo /** 915bf4ede01STejun Heo * move_linked_works - move linked works to a list 916bf4ede01STejun Heo * @work: start of series of works to be scheduled 917bf4ede01STejun Heo * @head: target list to append @work to 918bf4ede01STejun Heo * @nextp: out paramter for nested worklist walking 919bf4ede01STejun Heo * 920bf4ede01STejun Heo * Schedule linked works starting from @work to @head. Work series to 921bf4ede01STejun Heo * be scheduled starts at @work and includes any consecutive work with 922bf4ede01STejun Heo * WORK_STRUCT_LINKED set in its predecessor. 923bf4ede01STejun Heo * 924bf4ede01STejun Heo * If @nextp is not NULL, it's updated to point to the next work of 925bf4ede01STejun Heo * the last scheduled work. This allows move_linked_works() to be 926bf4ede01STejun Heo * nested inside outer list_for_each_entry_safe(). 927bf4ede01STejun Heo * 928bf4ede01STejun Heo * CONTEXT: 929d565ed63STejun Heo * spin_lock_irq(pool->lock). 930bf4ede01STejun Heo */ 931bf4ede01STejun Heo static void move_linked_works(struct work_struct *work, struct list_head *head, 932bf4ede01STejun Heo struct work_struct **nextp) 933bf4ede01STejun Heo { 934bf4ede01STejun Heo struct work_struct *n; 935bf4ede01STejun Heo 936bf4ede01STejun Heo /* 937bf4ede01STejun Heo * Linked worklist will always end before the end of the list, 938bf4ede01STejun Heo * use NULL for list head. 939bf4ede01STejun Heo */ 940bf4ede01STejun Heo list_for_each_entry_safe_from(work, n, NULL, entry) { 941bf4ede01STejun Heo list_move_tail(&work->entry, head); 942bf4ede01STejun Heo if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 943bf4ede01STejun Heo break; 944bf4ede01STejun Heo } 945bf4ede01STejun Heo 946bf4ede01STejun Heo /* 947bf4ede01STejun Heo * If we're already inside safe list traversal and have moved 948bf4ede01STejun Heo * multiple works to the scheduled queue, the next position 949bf4ede01STejun Heo * needs to be updated. 950bf4ede01STejun Heo */ 951bf4ede01STejun Heo if (nextp) 952bf4ede01STejun Heo *nextp = n; 953bf4ede01STejun Heo } 954bf4ede01STejun Heo 9558864b4e5STejun Heo /** 9568864b4e5STejun Heo * get_pwq - get an extra reference on the specified pool_workqueue 9578864b4e5STejun Heo * @pwq: pool_workqueue to get 9588864b4e5STejun Heo * 9598864b4e5STejun Heo * Obtain an extra reference on @pwq. The caller should guarantee that 9608864b4e5STejun Heo * @pwq has positive refcnt and be holding the matching pool->lock. 9618864b4e5STejun Heo */ 9628864b4e5STejun Heo static void get_pwq(struct pool_workqueue *pwq) 9638864b4e5STejun Heo { 9648864b4e5STejun Heo lockdep_assert_held(&pwq->pool->lock); 9658864b4e5STejun Heo WARN_ON_ONCE(pwq->refcnt <= 0); 9668864b4e5STejun Heo pwq->refcnt++; 9678864b4e5STejun Heo } 9688864b4e5STejun Heo 9698864b4e5STejun Heo /** 9708864b4e5STejun Heo * put_pwq - put a pool_workqueue reference 9718864b4e5STejun Heo * @pwq: pool_workqueue to put 9728864b4e5STejun Heo * 9738864b4e5STejun Heo * Drop a reference of @pwq. If its refcnt reaches zero, schedule its 9748864b4e5STejun Heo * destruction. The caller should be holding the matching pool->lock. 9758864b4e5STejun Heo */ 9768864b4e5STejun Heo static void put_pwq(struct pool_workqueue *pwq) 9778864b4e5STejun Heo { 9788864b4e5STejun Heo lockdep_assert_held(&pwq->pool->lock); 9798864b4e5STejun Heo if (likely(--pwq->refcnt)) 9808864b4e5STejun Heo return; 9818864b4e5STejun Heo if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND))) 9828864b4e5STejun Heo return; 9838864b4e5STejun Heo /* 9848864b4e5STejun Heo * @pwq can't be released under pool->lock, bounce to 9858864b4e5STejun Heo * pwq_unbound_release_workfn(). This never recurses on the same 9868864b4e5STejun Heo * pool->lock as this path is taken only for unbound workqueues and 9878864b4e5STejun Heo * the release work item is scheduled on a per-cpu workqueue. To 9888864b4e5STejun Heo * avoid lockdep warning, unbound pool->locks are given lockdep 9898864b4e5STejun Heo * subclass of 1 in get_unbound_pool(). 9908864b4e5STejun Heo */ 9918864b4e5STejun Heo schedule_work(&pwq->unbound_release_work); 9928864b4e5STejun Heo } 9938864b4e5STejun Heo 994112202d9STejun Heo static void pwq_activate_delayed_work(struct work_struct *work) 995bf4ede01STejun Heo { 996112202d9STejun Heo struct pool_workqueue *pwq = get_work_pwq(work); 997bf4ede01STejun Heo 998bf4ede01STejun Heo trace_workqueue_activate_work(work); 999112202d9STejun Heo move_linked_works(work, &pwq->pool->worklist, NULL); 1000bf4ede01STejun Heo __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); 1001112202d9STejun Heo pwq->nr_active++; 1002bf4ede01STejun Heo } 1003bf4ede01STejun Heo 1004112202d9STejun Heo static void pwq_activate_first_delayed(struct pool_workqueue *pwq) 10053aa62497SLai Jiangshan { 1006112202d9STejun Heo struct work_struct *work = list_first_entry(&pwq->delayed_works, 10073aa62497SLai Jiangshan struct work_struct, entry); 10083aa62497SLai Jiangshan 1009112202d9STejun Heo pwq_activate_delayed_work(work); 10103aa62497SLai Jiangshan } 10113aa62497SLai Jiangshan 1012bf4ede01STejun Heo /** 1013112202d9STejun Heo * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight 1014112202d9STejun Heo * @pwq: pwq of interest 1015bf4ede01STejun Heo * @color: color of work which left the queue 1016bf4ede01STejun Heo * 1017bf4ede01STejun Heo * A work either has completed or is removed from pending queue, 1018112202d9STejun Heo * decrement nr_in_flight of its pwq and handle workqueue flushing. 1019bf4ede01STejun Heo * 1020bf4ede01STejun Heo * CONTEXT: 1021d565ed63STejun Heo * spin_lock_irq(pool->lock). 1022bf4ede01STejun Heo */ 1023112202d9STejun Heo static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color) 1024bf4ede01STejun Heo { 10258864b4e5STejun Heo /* uncolored work items don't participate in flushing or nr_active */ 1026bf4ede01STejun Heo if (color == WORK_NO_COLOR) 10278864b4e5STejun Heo goto out_put; 1028bf4ede01STejun Heo 1029112202d9STejun Heo pwq->nr_in_flight[color]--; 1030bf4ede01STejun Heo 1031112202d9STejun Heo pwq->nr_active--; 1032112202d9STejun Heo if (!list_empty(&pwq->delayed_works)) { 1033bf4ede01STejun Heo /* one down, submit a delayed one */ 1034112202d9STejun Heo if (pwq->nr_active < pwq->max_active) 1035112202d9STejun Heo pwq_activate_first_delayed(pwq); 1036bf4ede01STejun Heo } 1037bf4ede01STejun Heo 1038bf4ede01STejun Heo /* is flush in progress and are we at the flushing tip? */ 1039112202d9STejun Heo if (likely(pwq->flush_color != color)) 10408864b4e5STejun Heo goto out_put; 1041bf4ede01STejun Heo 1042bf4ede01STejun Heo /* are there still in-flight works? */ 1043112202d9STejun Heo if (pwq->nr_in_flight[color]) 10448864b4e5STejun Heo goto out_put; 1045bf4ede01STejun Heo 1046112202d9STejun Heo /* this pwq is done, clear flush_color */ 1047112202d9STejun Heo pwq->flush_color = -1; 1048bf4ede01STejun Heo 1049bf4ede01STejun Heo /* 1050112202d9STejun Heo * If this was the last pwq, wake up the first flusher. It 1051bf4ede01STejun Heo * will handle the rest. 1052bf4ede01STejun Heo */ 1053112202d9STejun Heo if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) 1054112202d9STejun Heo complete(&pwq->wq->first_flusher->done); 10558864b4e5STejun Heo out_put: 10568864b4e5STejun Heo put_pwq(pwq); 1057bf4ede01STejun Heo } 1058bf4ede01STejun Heo 105936e227d2STejun Heo /** 1060bbb68dfaSTejun Heo * try_to_grab_pending - steal work item from worklist and disable irq 106136e227d2STejun Heo * @work: work item to steal 106236e227d2STejun Heo * @is_dwork: @work is a delayed_work 1063bbb68dfaSTejun Heo * @flags: place to store irq state 106436e227d2STejun Heo * 106536e227d2STejun Heo * Try to grab PENDING bit of @work. This function can handle @work in any 106636e227d2STejun Heo * stable state - idle, on timer or on worklist. Return values are 106736e227d2STejun Heo * 106836e227d2STejun Heo * 1 if @work was pending and we successfully stole PENDING 106936e227d2STejun Heo * 0 if @work was idle and we claimed PENDING 107036e227d2STejun Heo * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry 1071bbb68dfaSTejun Heo * -ENOENT if someone else is canceling @work, this state may persist 1072bbb68dfaSTejun Heo * for arbitrarily long 107336e227d2STejun Heo * 1074bbb68dfaSTejun Heo * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting 1075e0aecdd8STejun Heo * interrupted while holding PENDING and @work off queue, irq must be 1076e0aecdd8STejun Heo * disabled on entry. This, combined with delayed_work->timer being 1077e0aecdd8STejun Heo * irqsafe, ensures that we return -EAGAIN for finite short period of time. 1078bbb68dfaSTejun Heo * 1079bbb68dfaSTejun Heo * On successful return, >= 0, irq is disabled and the caller is 1080bbb68dfaSTejun Heo * responsible for releasing it using local_irq_restore(*@flags). 1081bbb68dfaSTejun Heo * 1082e0aecdd8STejun Heo * This function is safe to call from any context including IRQ handler. 1083bf4ede01STejun Heo */ 1084bbb68dfaSTejun Heo static int try_to_grab_pending(struct work_struct *work, bool is_dwork, 1085bbb68dfaSTejun Heo unsigned long *flags) 1086bf4ede01STejun Heo { 1087d565ed63STejun Heo struct worker_pool *pool; 1088112202d9STejun Heo struct pool_workqueue *pwq; 1089bf4ede01STejun Heo 1090bbb68dfaSTejun Heo local_irq_save(*flags); 1091bbb68dfaSTejun Heo 109236e227d2STejun Heo /* try to steal the timer if it exists */ 109336e227d2STejun Heo if (is_dwork) { 109436e227d2STejun Heo struct delayed_work *dwork = to_delayed_work(work); 109536e227d2STejun Heo 1096e0aecdd8STejun Heo /* 1097e0aecdd8STejun Heo * dwork->timer is irqsafe. If del_timer() fails, it's 1098e0aecdd8STejun Heo * guaranteed that the timer is not queued anywhere and not 1099e0aecdd8STejun Heo * running on the local CPU. 1100e0aecdd8STejun Heo */ 110136e227d2STejun Heo if (likely(del_timer(&dwork->timer))) 110236e227d2STejun Heo return 1; 110336e227d2STejun Heo } 110436e227d2STejun Heo 110536e227d2STejun Heo /* try to claim PENDING the normal way */ 1106bf4ede01STejun Heo if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) 1107bf4ede01STejun Heo return 0; 1108bf4ede01STejun Heo 1109bf4ede01STejun Heo /* 1110bf4ede01STejun Heo * The queueing is in progress, or it is already queued. Try to 1111bf4ede01STejun Heo * steal it from ->worklist without clearing WORK_STRUCT_PENDING. 1112bf4ede01STejun Heo */ 1113d565ed63STejun Heo pool = get_work_pool(work); 1114d565ed63STejun Heo if (!pool) 1115bbb68dfaSTejun Heo goto fail; 1116bf4ede01STejun Heo 1117d565ed63STejun Heo spin_lock(&pool->lock); 1118bf4ede01STejun Heo /* 1119112202d9STejun Heo * work->data is guaranteed to point to pwq only while the work 1120112202d9STejun Heo * item is queued on pwq->wq, and both updating work->data to point 1121112202d9STejun Heo * to pwq on queueing and to pool on dequeueing are done under 1122112202d9STejun Heo * pwq->pool->lock. This in turn guarantees that, if work->data 1123112202d9STejun Heo * points to pwq which is associated with a locked pool, the work 11240b3dae68SLai Jiangshan * item is currently queued on that pool. 1125bf4ede01STejun Heo */ 1126112202d9STejun Heo pwq = get_work_pwq(work); 1127112202d9STejun Heo if (pwq && pwq->pool == pool) { 1128bf4ede01STejun Heo debug_work_deactivate(work); 11293aa62497SLai Jiangshan 11303aa62497SLai Jiangshan /* 113116062836STejun Heo * A delayed work item cannot be grabbed directly because 113216062836STejun Heo * it might have linked NO_COLOR work items which, if left 1133112202d9STejun Heo * on the delayed_list, will confuse pwq->nr_active 113416062836STejun Heo * management later on and cause stall. Make sure the work 113516062836STejun Heo * item is activated before grabbing. 11363aa62497SLai Jiangshan */ 11373aa62497SLai Jiangshan if (*work_data_bits(work) & WORK_STRUCT_DELAYED) 1138112202d9STejun Heo pwq_activate_delayed_work(work); 11393aa62497SLai Jiangshan 1140bf4ede01STejun Heo list_del_init(&work->entry); 1141112202d9STejun Heo pwq_dec_nr_in_flight(get_work_pwq(work), get_work_color(work)); 114236e227d2STejun Heo 1143112202d9STejun Heo /* work->data points to pwq iff queued, point to pool */ 11444468a00fSLai Jiangshan set_work_pool_and_keep_pending(work, pool->id); 11454468a00fSLai Jiangshan 1146d565ed63STejun Heo spin_unlock(&pool->lock); 114736e227d2STejun Heo return 1; 1148bf4ede01STejun Heo } 1149d565ed63STejun Heo spin_unlock(&pool->lock); 1150bbb68dfaSTejun Heo fail: 1151bbb68dfaSTejun Heo local_irq_restore(*flags); 1152bbb68dfaSTejun Heo if (work_is_canceling(work)) 1153bbb68dfaSTejun Heo return -ENOENT; 1154bbb68dfaSTejun Heo cpu_relax(); 115536e227d2STejun Heo return -EAGAIN; 1156bf4ede01STejun Heo } 1157bf4ede01STejun Heo 1158bf4ede01STejun Heo /** 1159706026c2STejun Heo * insert_work - insert a work into a pool 1160112202d9STejun Heo * @pwq: pwq @work belongs to 11614690c4abSTejun Heo * @work: work to insert 11624690c4abSTejun Heo * @head: insertion point 11634690c4abSTejun Heo * @extra_flags: extra WORK_STRUCT_* flags to set 11644690c4abSTejun Heo * 1165112202d9STejun Heo * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to 1166706026c2STejun Heo * work_struct flags. 11674690c4abSTejun Heo * 11684690c4abSTejun Heo * CONTEXT: 1169d565ed63STejun Heo * spin_lock_irq(pool->lock). 1170365970a1SDavid Howells */ 1171112202d9STejun Heo static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, 1172112202d9STejun Heo struct list_head *head, unsigned int extra_flags) 1173b89deed3SOleg Nesterov { 1174112202d9STejun Heo struct worker_pool *pool = pwq->pool; 1175e1d8aa9fSFrederic Weisbecker 11764690c4abSTejun Heo /* we own @work, set data and link */ 1177112202d9STejun Heo set_work_pwq(work, pwq, extra_flags); 11781a4d9b0aSOleg Nesterov list_add_tail(&work->entry, head); 11798864b4e5STejun Heo get_pwq(pwq); 1180e22bee78STejun Heo 1181e22bee78STejun Heo /* 1182e22bee78STejun Heo * Ensure either worker_sched_deactivated() sees the above 1183e22bee78STejun Heo * list_add_tail() or we see zero nr_running to avoid workers 1184e22bee78STejun Heo * lying around lazily while there are works to be processed. 1185e22bee78STejun Heo */ 1186e22bee78STejun Heo smp_mb(); 1187e22bee78STejun Heo 118863d95a91STejun Heo if (__need_more_worker(pool)) 118963d95a91STejun Heo wake_up_worker(pool); 1190b89deed3SOleg Nesterov } 1191b89deed3SOleg Nesterov 1192c8efcc25STejun Heo /* 1193c8efcc25STejun Heo * Test whether @work is being queued from another work executing on the 11948d03ecfeSTejun Heo * same workqueue. 1195c8efcc25STejun Heo */ 1196c8efcc25STejun Heo static bool is_chained_work(struct workqueue_struct *wq) 1197c8efcc25STejun Heo { 1198c8efcc25STejun Heo struct worker *worker; 1199c8efcc25STejun Heo 12008d03ecfeSTejun Heo worker = current_wq_worker(); 1201c8efcc25STejun Heo /* 12028d03ecfeSTejun Heo * Return %true iff I'm a worker execuing a work item on @wq. If 12038d03ecfeSTejun Heo * I'm @worker, it's safe to dereference it without locking. 1204c8efcc25STejun Heo */ 1205112202d9STejun Heo return worker && worker->current_pwq->wq == wq; 1206c8efcc25STejun Heo } 1207c8efcc25STejun Heo 1208d84ff051STejun Heo static void __queue_work(int cpu, struct workqueue_struct *wq, 12091da177e4SLinus Torvalds struct work_struct *work) 12101da177e4SLinus Torvalds { 1211112202d9STejun Heo struct pool_workqueue *pwq; 1212c9178087STejun Heo struct worker_pool *last_pool; 12131e19ffc6STejun Heo struct list_head *worklist; 12148a2e8e5dSTejun Heo unsigned int work_flags; 1215b75cac93SJoonsoo Kim unsigned int req_cpu = cpu; 12168930cabaSTejun Heo 12178930cabaSTejun Heo /* 12188930cabaSTejun Heo * While a work item is PENDING && off queue, a task trying to 12198930cabaSTejun Heo * steal the PENDING will busy-loop waiting for it to either get 12208930cabaSTejun Heo * queued or lose PENDING. Grabbing PENDING and queueing should 12218930cabaSTejun Heo * happen with IRQ disabled. 12228930cabaSTejun Heo */ 12238930cabaSTejun Heo WARN_ON_ONCE(!irqs_disabled()); 12241da177e4SLinus Torvalds 1225dc186ad7SThomas Gleixner debug_work_activate(work); 12261e19ffc6STejun Heo 1227c8efcc25STejun Heo /* if dying, only works from the same workqueue are allowed */ 12289c5a2ba7STejun Heo if (unlikely(wq->flags & WQ_DRAINING) && 1229c8efcc25STejun Heo WARN_ON_ONCE(!is_chained_work(wq))) 1230e41e704bSTejun Heo return; 1231e41e704bSTejun Heo 1232c9178087STejun Heo /* pwq which will be used unless @work is executing elsewhere */ 1233c7fc77f7STejun Heo if (!(wq->flags & WQ_UNBOUND)) { 123457469821STejun Heo if (cpu == WORK_CPU_UNBOUND) 1235f3421797STejun Heo cpu = raw_smp_processor_id(); 1236c9178087STejun Heo pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); 1237c9178087STejun Heo } else { 1238c9178087STejun Heo pwq = first_pwq(wq); 1239c9178087STejun Heo } 1240f3421797STejun Heo 124118aa9effSTejun Heo /* 1242c9178087STejun Heo * If @work was previously on a different pool, it might still be 1243c9178087STejun Heo * running there, in which case the work needs to be queued on that 1244c9178087STejun Heo * pool to guarantee non-reentrancy. 124518aa9effSTejun Heo */ 1246c9e7cf27STejun Heo last_pool = get_work_pool(work); 1247112202d9STejun Heo if (last_pool && last_pool != pwq->pool) { 124818aa9effSTejun Heo struct worker *worker; 124918aa9effSTejun Heo 1250d565ed63STejun Heo spin_lock(&last_pool->lock); 125118aa9effSTejun Heo 1252c9e7cf27STejun Heo worker = find_worker_executing_work(last_pool, work); 125318aa9effSTejun Heo 1254112202d9STejun Heo if (worker && worker->current_pwq->wq == wq) { 1255c9178087STejun Heo pwq = worker->current_pwq; 12568594fadeSLai Jiangshan } else { 125718aa9effSTejun Heo /* meh... not running there, queue here */ 1258d565ed63STejun Heo spin_unlock(&last_pool->lock); 1259112202d9STejun Heo spin_lock(&pwq->pool->lock); 126018aa9effSTejun Heo } 12618930cabaSTejun Heo } else { 1262112202d9STejun Heo spin_lock(&pwq->pool->lock); 12638930cabaSTejun Heo } 1264502ca9d8STejun Heo 1265112202d9STejun Heo /* pwq determined, queue */ 1266112202d9STejun Heo trace_workqueue_queue_work(req_cpu, pwq, work); 1267502ca9d8STejun Heo 1268f5b2552bSDan Carpenter if (WARN_ON(!list_empty(&work->entry))) { 1269112202d9STejun Heo spin_unlock(&pwq->pool->lock); 1270f5b2552bSDan Carpenter return; 1271f5b2552bSDan Carpenter } 12721e19ffc6STejun Heo 1273112202d9STejun Heo pwq->nr_in_flight[pwq->work_color]++; 1274112202d9STejun Heo work_flags = work_color_to_flags(pwq->work_color); 12751e19ffc6STejun Heo 1276112202d9STejun Heo if (likely(pwq->nr_active < pwq->max_active)) { 1277cdadf009STejun Heo trace_workqueue_activate_work(work); 1278112202d9STejun Heo pwq->nr_active++; 1279112202d9STejun Heo worklist = &pwq->pool->worklist; 12808a2e8e5dSTejun Heo } else { 12818a2e8e5dSTejun Heo work_flags |= WORK_STRUCT_DELAYED; 1282112202d9STejun Heo worklist = &pwq->delayed_works; 12838a2e8e5dSTejun Heo } 12841e19ffc6STejun Heo 1285112202d9STejun Heo insert_work(pwq, work, worklist, work_flags); 12861e19ffc6STejun Heo 1287112202d9STejun Heo spin_unlock(&pwq->pool->lock); 12881da177e4SLinus Torvalds } 12891da177e4SLinus Torvalds 12900fcb78c2SRolf Eike Beer /** 1291c1a220e7SZhang Rui * queue_work_on - queue work on specific cpu 1292c1a220e7SZhang Rui * @cpu: CPU number to execute work on 1293c1a220e7SZhang Rui * @wq: workqueue to use 1294c1a220e7SZhang Rui * @work: work to queue 1295c1a220e7SZhang Rui * 1296d4283e93STejun Heo * Returns %false if @work was already on a queue, %true otherwise. 1297c1a220e7SZhang Rui * 1298c1a220e7SZhang Rui * We queue the work to a specific CPU, the caller must ensure it 1299c1a220e7SZhang Rui * can't go away. 1300c1a220e7SZhang Rui */ 1301d4283e93STejun Heo bool queue_work_on(int cpu, struct workqueue_struct *wq, 1302d4283e93STejun Heo struct work_struct *work) 1303c1a220e7SZhang Rui { 1304d4283e93STejun Heo bool ret = false; 13058930cabaSTejun Heo unsigned long flags; 13068930cabaSTejun Heo 13078930cabaSTejun Heo local_irq_save(flags); 1308c1a220e7SZhang Rui 130922df02bbSTejun Heo if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 13104690c4abSTejun Heo __queue_work(cpu, wq, work); 1311d4283e93STejun Heo ret = true; 1312c1a220e7SZhang Rui } 13138930cabaSTejun Heo 13148930cabaSTejun Heo local_irq_restore(flags); 1315c1a220e7SZhang Rui return ret; 1316c1a220e7SZhang Rui } 1317c1a220e7SZhang Rui EXPORT_SYMBOL_GPL(queue_work_on); 1318c1a220e7SZhang Rui 13190a13c00eSTejun Heo /** 13200a13c00eSTejun Heo * queue_work - queue work on a workqueue 13210a13c00eSTejun Heo * @wq: workqueue to use 13220a13c00eSTejun Heo * @work: work to queue 13230a13c00eSTejun Heo * 1324d4283e93STejun Heo * Returns %false if @work was already on a queue, %true otherwise. 13250a13c00eSTejun Heo * 13260a13c00eSTejun Heo * We queue the work to the CPU on which it was submitted, but if the CPU dies 13270a13c00eSTejun Heo * it can be processed by another CPU. 13280a13c00eSTejun Heo */ 1329d4283e93STejun Heo bool queue_work(struct workqueue_struct *wq, struct work_struct *work) 13300a13c00eSTejun Heo { 133157469821STejun Heo return queue_work_on(WORK_CPU_UNBOUND, wq, work); 13320a13c00eSTejun Heo } 13330a13c00eSTejun Heo EXPORT_SYMBOL_GPL(queue_work); 13340a13c00eSTejun Heo 1335d8e794dfSTejun Heo void delayed_work_timer_fn(unsigned long __data) 13361da177e4SLinus Torvalds { 133752bad64dSDavid Howells struct delayed_work *dwork = (struct delayed_work *)__data; 13381da177e4SLinus Torvalds 1339e0aecdd8STejun Heo /* should have been called from irqsafe timer with irq already off */ 134060c057bcSLai Jiangshan __queue_work(dwork->cpu, dwork->wq, &dwork->work); 13411da177e4SLinus Torvalds } 13421438ade5SKonstantin Khlebnikov EXPORT_SYMBOL(delayed_work_timer_fn); 13431da177e4SLinus Torvalds 13447beb2edfSTejun Heo static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, 134552bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 13461da177e4SLinus Torvalds { 13477beb2edfSTejun Heo struct timer_list *timer = &dwork->timer; 13487beb2edfSTejun Heo struct work_struct *work = &dwork->work; 13491da177e4SLinus Torvalds 13507beb2edfSTejun Heo WARN_ON_ONCE(timer->function != delayed_work_timer_fn || 13517beb2edfSTejun Heo timer->data != (unsigned long)dwork); 1352fc4b514fSTejun Heo WARN_ON_ONCE(timer_pending(timer)); 1353fc4b514fSTejun Heo WARN_ON_ONCE(!list_empty(&work->entry)); 13547beb2edfSTejun Heo 13558852aac2STejun Heo /* 13568852aac2STejun Heo * If @delay is 0, queue @dwork->work immediately. This is for 13578852aac2STejun Heo * both optimization and correctness. The earliest @timer can 13588852aac2STejun Heo * expire is on the closest next tick and delayed_work users depend 13598852aac2STejun Heo * on that there's no such delay when @delay is 0. 13608852aac2STejun Heo */ 13618852aac2STejun Heo if (!delay) { 13628852aac2STejun Heo __queue_work(cpu, wq, &dwork->work); 13638852aac2STejun Heo return; 13648852aac2STejun Heo } 13658852aac2STejun Heo 13667beb2edfSTejun Heo timer_stats_timer_set_start_info(&dwork->timer); 13677beb2edfSTejun Heo 136860c057bcSLai Jiangshan dwork->wq = wq; 13691265057fSTejun Heo dwork->cpu = cpu; 13707beb2edfSTejun Heo timer->expires = jiffies + delay; 13717beb2edfSTejun Heo 13727beb2edfSTejun Heo if (unlikely(cpu != WORK_CPU_UNBOUND)) 13737beb2edfSTejun Heo add_timer_on(timer, cpu); 13747beb2edfSTejun Heo else 13757beb2edfSTejun Heo add_timer(timer); 13767beb2edfSTejun Heo } 13771da177e4SLinus Torvalds 13780fcb78c2SRolf Eike Beer /** 13790fcb78c2SRolf Eike Beer * queue_delayed_work_on - queue work on specific CPU after delay 13800fcb78c2SRolf Eike Beer * @cpu: CPU number to execute work on 13810fcb78c2SRolf Eike Beer * @wq: workqueue to use 1382af9997e4SRandy Dunlap * @dwork: work to queue 13830fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 13840fcb78c2SRolf Eike Beer * 1385715f1300STejun Heo * Returns %false if @work was already on a queue, %true otherwise. If 1386715f1300STejun Heo * @delay is zero and @dwork is idle, it will be scheduled for immediate 1387715f1300STejun Heo * execution. 13880fcb78c2SRolf Eike Beer */ 1389d4283e93STejun Heo bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 139052bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 13917a6bc1cdSVenkatesh Pallipadi { 139252bad64dSDavid Howells struct work_struct *work = &dwork->work; 1393d4283e93STejun Heo bool ret = false; 13948930cabaSTejun Heo unsigned long flags; 13958930cabaSTejun Heo 13968930cabaSTejun Heo /* read the comment in __queue_work() */ 13978930cabaSTejun Heo local_irq_save(flags); 13987a6bc1cdSVenkatesh Pallipadi 139922df02bbSTejun Heo if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 14007beb2edfSTejun Heo __queue_delayed_work(cpu, wq, dwork, delay); 1401d4283e93STejun Heo ret = true; 14027a6bc1cdSVenkatesh Pallipadi } 14038930cabaSTejun Heo 14048930cabaSTejun Heo local_irq_restore(flags); 14057a6bc1cdSVenkatesh Pallipadi return ret; 14067a6bc1cdSVenkatesh Pallipadi } 1407ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on); 14081da177e4SLinus Torvalds 1409c8e55f36STejun Heo /** 14100a13c00eSTejun Heo * queue_delayed_work - queue work on a workqueue after delay 14110a13c00eSTejun Heo * @wq: workqueue to use 14120a13c00eSTejun Heo * @dwork: delayable work to queue 14130a13c00eSTejun Heo * @delay: number of jiffies to wait before queueing 14140a13c00eSTejun Heo * 1415715f1300STejun Heo * Equivalent to queue_delayed_work_on() but tries to use the local CPU. 14160a13c00eSTejun Heo */ 1417d4283e93STejun Heo bool queue_delayed_work(struct workqueue_struct *wq, 14180a13c00eSTejun Heo struct delayed_work *dwork, unsigned long delay) 14190a13c00eSTejun Heo { 142057469821STejun Heo return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); 14210a13c00eSTejun Heo } 14220a13c00eSTejun Heo EXPORT_SYMBOL_GPL(queue_delayed_work); 14230a13c00eSTejun Heo 14240a13c00eSTejun Heo /** 14258376fe22STejun Heo * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU 14268376fe22STejun Heo * @cpu: CPU number to execute work on 14278376fe22STejun Heo * @wq: workqueue to use 14288376fe22STejun Heo * @dwork: work to queue 14298376fe22STejun Heo * @delay: number of jiffies to wait before queueing 14308376fe22STejun Heo * 14318376fe22STejun Heo * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise, 14328376fe22STejun Heo * modify @dwork's timer so that it expires after @delay. If @delay is 14338376fe22STejun Heo * zero, @work is guaranteed to be scheduled immediately regardless of its 14348376fe22STejun Heo * current state. 14358376fe22STejun Heo * 14368376fe22STejun Heo * Returns %false if @dwork was idle and queued, %true if @dwork was 14378376fe22STejun Heo * pending and its timer was modified. 14388376fe22STejun Heo * 1439e0aecdd8STejun Heo * This function is safe to call from any context including IRQ handler. 14408376fe22STejun Heo * See try_to_grab_pending() for details. 14418376fe22STejun Heo */ 14428376fe22STejun Heo bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 14438376fe22STejun Heo struct delayed_work *dwork, unsigned long delay) 14448376fe22STejun Heo { 14458376fe22STejun Heo unsigned long flags; 14468376fe22STejun Heo int ret; 14478376fe22STejun Heo 14488376fe22STejun Heo do { 14498376fe22STejun Heo ret = try_to_grab_pending(&dwork->work, true, &flags); 14508376fe22STejun Heo } while (unlikely(ret == -EAGAIN)); 14518376fe22STejun Heo 14528376fe22STejun Heo if (likely(ret >= 0)) { 14538376fe22STejun Heo __queue_delayed_work(cpu, wq, dwork, delay); 14548376fe22STejun Heo local_irq_restore(flags); 14558376fe22STejun Heo } 14568376fe22STejun Heo 14578376fe22STejun Heo /* -ENOENT from try_to_grab_pending() becomes %true */ 14588376fe22STejun Heo return ret; 14598376fe22STejun Heo } 14608376fe22STejun Heo EXPORT_SYMBOL_GPL(mod_delayed_work_on); 14618376fe22STejun Heo 14628376fe22STejun Heo /** 14638376fe22STejun Heo * mod_delayed_work - modify delay of or queue a delayed work 14648376fe22STejun Heo * @wq: workqueue to use 14658376fe22STejun Heo * @dwork: work to queue 14668376fe22STejun Heo * @delay: number of jiffies to wait before queueing 14678376fe22STejun Heo * 14688376fe22STejun Heo * mod_delayed_work_on() on local CPU. 14698376fe22STejun Heo */ 14708376fe22STejun Heo bool mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork, 14718376fe22STejun Heo unsigned long delay) 14728376fe22STejun Heo { 14738376fe22STejun Heo return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); 14748376fe22STejun Heo } 14758376fe22STejun Heo EXPORT_SYMBOL_GPL(mod_delayed_work); 14768376fe22STejun Heo 14778376fe22STejun Heo /** 1478c8e55f36STejun Heo * worker_enter_idle - enter idle state 1479c8e55f36STejun Heo * @worker: worker which is entering idle state 1480c8e55f36STejun Heo * 1481c8e55f36STejun Heo * @worker is entering idle state. Update stats and idle timer if 1482c8e55f36STejun Heo * necessary. 1483c8e55f36STejun Heo * 1484c8e55f36STejun Heo * LOCKING: 1485d565ed63STejun Heo * spin_lock_irq(pool->lock). 1486c8e55f36STejun Heo */ 1487c8e55f36STejun Heo static void worker_enter_idle(struct worker *worker) 14881da177e4SLinus Torvalds { 1489bd7bdd43STejun Heo struct worker_pool *pool = worker->pool; 1490c8e55f36STejun Heo 14916183c009STejun Heo if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) || 14926183c009STejun Heo WARN_ON_ONCE(!list_empty(&worker->entry) && 14936183c009STejun Heo (worker->hentry.next || worker->hentry.pprev))) 14946183c009STejun Heo return; 1495c8e55f36STejun Heo 1496cb444766STejun Heo /* can't use worker_set_flags(), also called from start_worker() */ 1497cb444766STejun Heo worker->flags |= WORKER_IDLE; 1498bd7bdd43STejun Heo pool->nr_idle++; 1499e22bee78STejun Heo worker->last_active = jiffies; 1500c8e55f36STejun Heo 1501c8e55f36STejun Heo /* idle_list is LIFO */ 1502bd7bdd43STejun Heo list_add(&worker->entry, &pool->idle_list); 1503db7bccf4STejun Heo 150463d95a91STejun Heo if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) 1505628c78e7STejun Heo mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); 1506cb444766STejun Heo 1507544ecf31STejun Heo /* 1508706026c2STejun Heo * Sanity check nr_running. Because wq_unbind_fn() releases 1509d565ed63STejun Heo * pool->lock between setting %WORKER_UNBOUND and zapping 1510628c78e7STejun Heo * nr_running, the warning may trigger spuriously. Check iff 1511628c78e7STejun Heo * unbind is not in progress. 1512544ecf31STejun Heo */ 151324647570STejun Heo WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && 1514bd7bdd43STejun Heo pool->nr_workers == pool->nr_idle && 1515e19e397aSTejun Heo atomic_read(&pool->nr_running)); 1516c8e55f36STejun Heo } 1517c8e55f36STejun Heo 1518c8e55f36STejun Heo /** 1519c8e55f36STejun Heo * worker_leave_idle - leave idle state 1520c8e55f36STejun Heo * @worker: worker which is leaving idle state 1521c8e55f36STejun Heo * 1522c8e55f36STejun Heo * @worker is leaving idle state. Update stats. 1523c8e55f36STejun Heo * 1524c8e55f36STejun Heo * LOCKING: 1525d565ed63STejun Heo * spin_lock_irq(pool->lock). 1526c8e55f36STejun Heo */ 1527c8e55f36STejun Heo static void worker_leave_idle(struct worker *worker) 1528c8e55f36STejun Heo { 1529bd7bdd43STejun Heo struct worker_pool *pool = worker->pool; 1530c8e55f36STejun Heo 15316183c009STejun Heo if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE))) 15326183c009STejun Heo return; 1533d302f017STejun Heo worker_clr_flags(worker, WORKER_IDLE); 1534bd7bdd43STejun Heo pool->nr_idle--; 1535c8e55f36STejun Heo list_del_init(&worker->entry); 1536c8e55f36STejun Heo } 1537c8e55f36STejun Heo 1538e22bee78STejun Heo /** 1539f36dc67bSLai Jiangshan * worker_maybe_bind_and_lock - try to bind %current to worker_pool and lock it 1540f36dc67bSLai Jiangshan * @pool: target worker_pool 1541f36dc67bSLai Jiangshan * 1542f36dc67bSLai Jiangshan * Bind %current to the cpu of @pool if it is associated and lock @pool. 1543e22bee78STejun Heo * 1544e22bee78STejun Heo * Works which are scheduled while the cpu is online must at least be 1545e22bee78STejun Heo * scheduled to a worker which is bound to the cpu so that if they are 1546e22bee78STejun Heo * flushed from cpu callbacks while cpu is going down, they are 1547e22bee78STejun Heo * guaranteed to execute on the cpu. 1548e22bee78STejun Heo * 1549f5faa077SLai Jiangshan * This function is to be used by unbound workers and rescuers to bind 1550e22bee78STejun Heo * themselves to the target cpu and may race with cpu going down or 1551e22bee78STejun Heo * coming online. kthread_bind() can't be used because it may put the 1552e22bee78STejun Heo * worker to already dead cpu and set_cpus_allowed_ptr() can't be used 1553706026c2STejun Heo * verbatim as it's best effort and blocking and pool may be 1554e22bee78STejun Heo * [dis]associated in the meantime. 1555e22bee78STejun Heo * 1556706026c2STejun Heo * This function tries set_cpus_allowed() and locks pool and verifies the 155724647570STejun Heo * binding against %POOL_DISASSOCIATED which is set during 1558f2d5a0eeSTejun Heo * %CPU_DOWN_PREPARE and cleared during %CPU_ONLINE, so if the worker 1559f2d5a0eeSTejun Heo * enters idle state or fetches works without dropping lock, it can 1560f2d5a0eeSTejun Heo * guarantee the scheduling requirement described in the first paragraph. 1561e22bee78STejun Heo * 1562e22bee78STejun Heo * CONTEXT: 1563d565ed63STejun Heo * Might sleep. Called without any lock but returns with pool->lock 1564e22bee78STejun Heo * held. 1565e22bee78STejun Heo * 1566e22bee78STejun Heo * RETURNS: 1567706026c2STejun Heo * %true if the associated pool is online (@worker is successfully 1568e22bee78STejun Heo * bound), %false if offline. 1569e22bee78STejun Heo */ 1570f36dc67bSLai Jiangshan static bool worker_maybe_bind_and_lock(struct worker_pool *pool) 1571d565ed63STejun Heo __acquires(&pool->lock) 1572e22bee78STejun Heo { 1573e22bee78STejun Heo while (true) { 1574e22bee78STejun Heo /* 1575e22bee78STejun Heo * The following call may fail, succeed or succeed 1576e22bee78STejun Heo * without actually migrating the task to the cpu if 1577e22bee78STejun Heo * it races with cpu hotunplug operation. Verify 157824647570STejun Heo * against POOL_DISASSOCIATED. 1579e22bee78STejun Heo */ 158024647570STejun Heo if (!(pool->flags & POOL_DISASSOCIATED)) 15817a4e344cSTejun Heo set_cpus_allowed_ptr(current, pool->attrs->cpumask); 1582e22bee78STejun Heo 1583d565ed63STejun Heo spin_lock_irq(&pool->lock); 158424647570STejun Heo if (pool->flags & POOL_DISASSOCIATED) 1585e22bee78STejun Heo return false; 1586f5faa077SLai Jiangshan if (task_cpu(current) == pool->cpu && 15877a4e344cSTejun Heo cpumask_equal(¤t->cpus_allowed, pool->attrs->cpumask)) 1588e22bee78STejun Heo return true; 1589d565ed63STejun Heo spin_unlock_irq(&pool->lock); 1590e22bee78STejun Heo 15915035b20fSTejun Heo /* 15925035b20fSTejun Heo * We've raced with CPU hot[un]plug. Give it a breather 15935035b20fSTejun Heo * and retry migration. cond_resched() is required here; 15945035b20fSTejun Heo * otherwise, we might deadlock against cpu_stop trying to 15955035b20fSTejun Heo * bring down the CPU on non-preemptive kernel. 15965035b20fSTejun Heo */ 1597e22bee78STejun Heo cpu_relax(); 15985035b20fSTejun Heo cond_resched(); 1599e22bee78STejun Heo } 1600e22bee78STejun Heo } 1601e22bee78STejun Heo 1602e22bee78STejun Heo /* 1603ea1abd61SLai Jiangshan * Rebind an idle @worker to its CPU. worker_thread() will test 16045f7dabfdSLai Jiangshan * list_empty(@worker->entry) before leaving idle and call this function. 160525511a47STejun Heo */ 160625511a47STejun Heo static void idle_worker_rebind(struct worker *worker) 160725511a47STejun Heo { 16085f7dabfdSLai Jiangshan /* CPU may go down again inbetween, clear UNBOUND only on success */ 1609f36dc67bSLai Jiangshan if (worker_maybe_bind_and_lock(worker->pool)) 16105f7dabfdSLai Jiangshan worker_clr_flags(worker, WORKER_UNBOUND); 161125511a47STejun Heo 1612ea1abd61SLai Jiangshan /* rebind complete, become available again */ 1613ea1abd61SLai Jiangshan list_add(&worker->entry, &worker->pool->idle_list); 1614d565ed63STejun Heo spin_unlock_irq(&worker->pool->lock); 161525511a47STejun Heo } 161625511a47STejun Heo 161725511a47STejun Heo /* 161825511a47STejun Heo * Function for @worker->rebind.work used to rebind unbound busy workers to 1619403c821dSTejun Heo * the associated cpu which is coming back online. This is scheduled by 1620403c821dSTejun Heo * cpu up but can race with other cpu hotplug operations and may be 1621403c821dSTejun Heo * executed twice without intervening cpu down. 1622e22bee78STejun Heo */ 162325511a47STejun Heo static void busy_worker_rebind_fn(struct work_struct *work) 1624e22bee78STejun Heo { 1625e22bee78STejun Heo struct worker *worker = container_of(work, struct worker, rebind_work); 1626e22bee78STejun Heo 1627f36dc67bSLai Jiangshan if (worker_maybe_bind_and_lock(worker->pool)) 1628eab6d828SLai Jiangshan worker_clr_flags(worker, WORKER_UNBOUND); 1629e22bee78STejun Heo 1630d565ed63STejun Heo spin_unlock_irq(&worker->pool->lock); 1631e22bee78STejun Heo } 1632e22bee78STejun Heo 163325511a47STejun Heo /** 163494cf58bbSTejun Heo * rebind_workers - rebind all workers of a pool to the associated CPU 163594cf58bbSTejun Heo * @pool: pool of interest 163625511a47STejun Heo * 163794cf58bbSTejun Heo * @pool->cpu is coming online. Rebind all workers to the CPU. Rebinding 163825511a47STejun Heo * is different for idle and busy ones. 163925511a47STejun Heo * 1640ea1abd61SLai Jiangshan * Idle ones will be removed from the idle_list and woken up. They will 1641ea1abd61SLai Jiangshan * add themselves back after completing rebind. This ensures that the 1642ea1abd61SLai Jiangshan * idle_list doesn't contain any unbound workers when re-bound busy workers 1643ea1abd61SLai Jiangshan * try to perform local wake-ups for concurrency management. 164425511a47STejun Heo * 1645ea1abd61SLai Jiangshan * Busy workers can rebind after they finish their current work items. 1646ea1abd61SLai Jiangshan * Queueing the rebind work item at the head of the scheduled list is 1647ea1abd61SLai Jiangshan * enough. Note that nr_running will be properly bumped as busy workers 1648ea1abd61SLai Jiangshan * rebind. 164925511a47STejun Heo * 1650ea1abd61SLai Jiangshan * On return, all non-manager workers are scheduled for rebind - see 1651ea1abd61SLai Jiangshan * manage_workers() for the manager special case. Any idle worker 1652ea1abd61SLai Jiangshan * including the manager will not appear on @idle_list until rebind is 1653ea1abd61SLai Jiangshan * complete, making local wake-ups safe. 165425511a47STejun Heo */ 165594cf58bbSTejun Heo static void rebind_workers(struct worker_pool *pool) 165625511a47STejun Heo { 1657ea1abd61SLai Jiangshan struct worker *worker, *n; 165825511a47STejun Heo int i; 165925511a47STejun Heo 1660b2eb83d1SLai Jiangshan lockdep_assert_held(&pool->assoc_mutex); 1661d565ed63STejun Heo lockdep_assert_held(&pool->lock); 166225511a47STejun Heo 16635f7dabfdSLai Jiangshan /* dequeue and kick idle ones */ 1664ea1abd61SLai Jiangshan list_for_each_entry_safe(worker, n, &pool->idle_list, entry) { 1665ea1abd61SLai Jiangshan /* 166694cf58bbSTejun Heo * idle workers should be off @pool->idle_list until rebind 166794cf58bbSTejun Heo * is complete to avoid receiving premature local wake-ups. 1668ea1abd61SLai Jiangshan */ 1669ea1abd61SLai Jiangshan list_del_init(&worker->entry); 167025511a47STejun Heo 167125511a47STejun Heo /* 167294cf58bbSTejun Heo * worker_thread() will see the above dequeuing and call 167394cf58bbSTejun Heo * idle_worker_rebind(). 167425511a47STejun Heo */ 167525511a47STejun Heo wake_up_process(worker->task); 167625511a47STejun Heo } 167725511a47STejun Heo 1678ea1abd61SLai Jiangshan /* rebind busy workers */ 1679b67bfe0dSSasha Levin for_each_busy_worker(worker, i, pool) { 168025511a47STejun Heo struct work_struct *rebind_work = &worker->rebind_work; 1681e2b6a6d5SJoonsoo Kim struct workqueue_struct *wq; 168225511a47STejun Heo 168325511a47STejun Heo if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, 168425511a47STejun Heo work_data_bits(rebind_work))) 168525511a47STejun Heo continue; 168625511a47STejun Heo 168725511a47STejun Heo debug_work_activate(rebind_work); 168890beca5dSTejun Heo 168990beca5dSTejun Heo /* 169094cf58bbSTejun Heo * wq doesn't really matter but let's keep @worker->pool 1691112202d9STejun Heo * and @pwq->pool consistent for sanity. 169290beca5dSTejun Heo */ 16937a4e344cSTejun Heo if (worker->pool->attrs->nice < 0) 1694e2b6a6d5SJoonsoo Kim wq = system_highpri_wq; 1695e2b6a6d5SJoonsoo Kim else 1696e2b6a6d5SJoonsoo Kim wq = system_wq; 1697ec58815aSTejun Heo 16987fb98ea7STejun Heo insert_work(per_cpu_ptr(wq->cpu_pwqs, pool->cpu), rebind_work, 169925511a47STejun Heo worker->scheduled.next, 170025511a47STejun Heo work_color_to_flags(WORK_NO_COLOR)); 1701ec58815aSTejun Heo } 170225511a47STejun Heo } 170325511a47STejun Heo 1704c34056a3STejun Heo static struct worker *alloc_worker(void) 1705c34056a3STejun Heo { 1706c34056a3STejun Heo struct worker *worker; 1707c34056a3STejun Heo 1708c34056a3STejun Heo worker = kzalloc(sizeof(*worker), GFP_KERNEL); 1709c8e55f36STejun Heo if (worker) { 1710c8e55f36STejun Heo INIT_LIST_HEAD(&worker->entry); 1711affee4b2STejun Heo INIT_LIST_HEAD(&worker->scheduled); 171225511a47STejun Heo INIT_WORK(&worker->rebind_work, busy_worker_rebind_fn); 1713e22bee78STejun Heo /* on creation a worker is in !idle && prep state */ 1714e22bee78STejun Heo worker->flags = WORKER_PREP; 1715c8e55f36STejun Heo } 1716c34056a3STejun Heo return worker; 1717c34056a3STejun Heo } 1718c34056a3STejun Heo 1719c34056a3STejun Heo /** 1720c34056a3STejun Heo * create_worker - create a new workqueue worker 172163d95a91STejun Heo * @pool: pool the new worker will belong to 1722c34056a3STejun Heo * 172363d95a91STejun Heo * Create a new worker which is bound to @pool. The returned worker 1724c34056a3STejun Heo * can be started by calling start_worker() or destroyed using 1725c34056a3STejun Heo * destroy_worker(). 1726c34056a3STejun Heo * 1727c34056a3STejun Heo * CONTEXT: 1728c34056a3STejun Heo * Might sleep. Does GFP_KERNEL allocations. 1729c34056a3STejun Heo * 1730c34056a3STejun Heo * RETURNS: 1731c34056a3STejun Heo * Pointer to the newly created worker. 1732c34056a3STejun Heo */ 1733bc2ae0f5STejun Heo static struct worker *create_worker(struct worker_pool *pool) 1734c34056a3STejun Heo { 17357a4e344cSTejun Heo const char *pri = pool->attrs->nice < 0 ? "H" : ""; 1736c34056a3STejun Heo struct worker *worker = NULL; 1737f3421797STejun Heo int id = -1; 1738c34056a3STejun Heo 1739d565ed63STejun Heo spin_lock_irq(&pool->lock); 1740bd7bdd43STejun Heo while (ida_get_new(&pool->worker_ida, &id)) { 1741d565ed63STejun Heo spin_unlock_irq(&pool->lock); 1742bd7bdd43STejun Heo if (!ida_pre_get(&pool->worker_ida, GFP_KERNEL)) 1743c34056a3STejun Heo goto fail; 1744d565ed63STejun Heo spin_lock_irq(&pool->lock); 1745c34056a3STejun Heo } 1746d565ed63STejun Heo spin_unlock_irq(&pool->lock); 1747c34056a3STejun Heo 1748c34056a3STejun Heo worker = alloc_worker(); 1749c34056a3STejun Heo if (!worker) 1750c34056a3STejun Heo goto fail; 1751c34056a3STejun Heo 1752bd7bdd43STejun Heo worker->pool = pool; 1753c34056a3STejun Heo worker->id = id; 1754c34056a3STejun Heo 175529c91e99STejun Heo if (pool->cpu >= 0) 175694dcf29aSEric Dumazet worker->task = kthread_create_on_node(worker_thread, 1757ec22ca5eSTejun Heo worker, cpu_to_node(pool->cpu), 1758d84ff051STejun Heo "kworker/%d:%d%s", pool->cpu, id, pri); 1759f3421797STejun Heo else 1760f3421797STejun Heo worker->task = kthread_create(worker_thread, worker, 1761ac6104cdSTejun Heo "kworker/u%d:%d%s", 1762ac6104cdSTejun Heo pool->id, id, pri); 1763c34056a3STejun Heo if (IS_ERR(worker->task)) 1764c34056a3STejun Heo goto fail; 1765c34056a3STejun Heo 17667a4e344cSTejun Heo set_user_nice(worker->task, pool->attrs->nice); 17677a4e344cSTejun Heo set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); 17683270476aSTejun Heo 1769db7bccf4STejun Heo /* 17707a4e344cSTejun Heo * %PF_THREAD_BOUND is used to prevent userland from meddling with 17717a4e344cSTejun Heo * cpumask of workqueue workers. This is an abuse. We need 17727a4e344cSTejun Heo * %PF_NO_SETAFFINITY. 1773db7bccf4STejun Heo */ 1774db7bccf4STejun Heo worker->task->flags |= PF_THREAD_BOUND; 17757a4e344cSTejun Heo 17767a4e344cSTejun Heo /* 17777a4e344cSTejun Heo * The caller is responsible for ensuring %POOL_DISASSOCIATED 17787a4e344cSTejun Heo * remains stable across this function. See the comments above the 17797a4e344cSTejun Heo * flag definition for details. 17807a4e344cSTejun Heo */ 17817a4e344cSTejun Heo if (pool->flags & POOL_DISASSOCIATED) 1782f3421797STejun Heo worker->flags |= WORKER_UNBOUND; 1783c34056a3STejun Heo 1784c34056a3STejun Heo return worker; 1785c34056a3STejun Heo fail: 1786c34056a3STejun Heo if (id >= 0) { 1787d565ed63STejun Heo spin_lock_irq(&pool->lock); 1788bd7bdd43STejun Heo ida_remove(&pool->worker_ida, id); 1789d565ed63STejun Heo spin_unlock_irq(&pool->lock); 1790c34056a3STejun Heo } 1791c34056a3STejun Heo kfree(worker); 1792c34056a3STejun Heo return NULL; 1793c34056a3STejun Heo } 1794c34056a3STejun Heo 1795c34056a3STejun Heo /** 1796c34056a3STejun Heo * start_worker - start a newly created worker 1797c34056a3STejun Heo * @worker: worker to start 1798c34056a3STejun Heo * 1799706026c2STejun Heo * Make the pool aware of @worker and start it. 1800c34056a3STejun Heo * 1801c34056a3STejun Heo * CONTEXT: 1802d565ed63STejun Heo * spin_lock_irq(pool->lock). 1803c34056a3STejun Heo */ 1804c34056a3STejun Heo static void start_worker(struct worker *worker) 1805c34056a3STejun Heo { 1806cb444766STejun Heo worker->flags |= WORKER_STARTED; 1807bd7bdd43STejun Heo worker->pool->nr_workers++; 1808c8e55f36STejun Heo worker_enter_idle(worker); 1809c34056a3STejun Heo wake_up_process(worker->task); 1810c34056a3STejun Heo } 1811c34056a3STejun Heo 1812c34056a3STejun Heo /** 1813c34056a3STejun Heo * destroy_worker - destroy a workqueue worker 1814c34056a3STejun Heo * @worker: worker to be destroyed 1815c34056a3STejun Heo * 1816706026c2STejun Heo * Destroy @worker and adjust @pool stats accordingly. 1817c8e55f36STejun Heo * 1818c8e55f36STejun Heo * CONTEXT: 1819d565ed63STejun Heo * spin_lock_irq(pool->lock) which is released and regrabbed. 1820c34056a3STejun Heo */ 1821c34056a3STejun Heo static void destroy_worker(struct worker *worker) 1822c34056a3STejun Heo { 1823bd7bdd43STejun Heo struct worker_pool *pool = worker->pool; 1824c34056a3STejun Heo int id = worker->id; 1825c34056a3STejun Heo 1826c34056a3STejun Heo /* sanity check frenzy */ 18276183c009STejun Heo if (WARN_ON(worker->current_work) || 18286183c009STejun Heo WARN_ON(!list_empty(&worker->scheduled))) 18296183c009STejun Heo return; 1830c34056a3STejun Heo 1831c8e55f36STejun Heo if (worker->flags & WORKER_STARTED) 1832bd7bdd43STejun Heo pool->nr_workers--; 1833c8e55f36STejun Heo if (worker->flags & WORKER_IDLE) 1834bd7bdd43STejun Heo pool->nr_idle--; 1835c8e55f36STejun Heo 1836c8e55f36STejun Heo list_del_init(&worker->entry); 1837cb444766STejun Heo worker->flags |= WORKER_DIE; 1838c8e55f36STejun Heo 1839d565ed63STejun Heo spin_unlock_irq(&pool->lock); 1840c8e55f36STejun Heo 1841c34056a3STejun Heo kthread_stop(worker->task); 1842c34056a3STejun Heo kfree(worker); 1843c34056a3STejun Heo 1844d565ed63STejun Heo spin_lock_irq(&pool->lock); 1845bd7bdd43STejun Heo ida_remove(&pool->worker_ida, id); 1846c34056a3STejun Heo } 1847c34056a3STejun Heo 184863d95a91STejun Heo static void idle_worker_timeout(unsigned long __pool) 1849e22bee78STejun Heo { 185063d95a91STejun Heo struct worker_pool *pool = (void *)__pool; 1851e22bee78STejun Heo 1852d565ed63STejun Heo spin_lock_irq(&pool->lock); 1853e22bee78STejun Heo 185463d95a91STejun Heo if (too_many_workers(pool)) { 1855e22bee78STejun Heo struct worker *worker; 1856e22bee78STejun Heo unsigned long expires; 1857e22bee78STejun Heo 1858e22bee78STejun Heo /* idle_list is kept in LIFO order, check the last one */ 185963d95a91STejun Heo worker = list_entry(pool->idle_list.prev, struct worker, entry); 1860e22bee78STejun Heo expires = worker->last_active + IDLE_WORKER_TIMEOUT; 1861e22bee78STejun Heo 1862e22bee78STejun Heo if (time_before(jiffies, expires)) 186363d95a91STejun Heo mod_timer(&pool->idle_timer, expires); 1864e22bee78STejun Heo else { 1865e22bee78STejun Heo /* it's been idle for too long, wake up manager */ 186611ebea50STejun Heo pool->flags |= POOL_MANAGE_WORKERS; 186763d95a91STejun Heo wake_up_worker(pool); 1868e22bee78STejun Heo } 1869e22bee78STejun Heo } 1870e22bee78STejun Heo 1871d565ed63STejun Heo spin_unlock_irq(&pool->lock); 1872e22bee78STejun Heo } 1873e22bee78STejun Heo 1874493a1724STejun Heo static void send_mayday(struct work_struct *work) 1875e22bee78STejun Heo { 1876112202d9STejun Heo struct pool_workqueue *pwq = get_work_pwq(work); 1877112202d9STejun Heo struct workqueue_struct *wq = pwq->wq; 1878493a1724STejun Heo 1879493a1724STejun Heo lockdep_assert_held(&workqueue_lock); 1880e22bee78STejun Heo 1881493008a8STejun Heo if (!wq->rescuer) 1882493a1724STejun Heo return; 1883e22bee78STejun Heo 1884e22bee78STejun Heo /* mayday mayday mayday */ 1885493a1724STejun Heo if (list_empty(&pwq->mayday_node)) { 1886493a1724STejun Heo list_add_tail(&pwq->mayday_node, &wq->maydays); 1887e22bee78STejun Heo wake_up_process(wq->rescuer->task); 1888493a1724STejun Heo } 1889e22bee78STejun Heo } 1890e22bee78STejun Heo 1891706026c2STejun Heo static void pool_mayday_timeout(unsigned long __pool) 1892e22bee78STejun Heo { 189363d95a91STejun Heo struct worker_pool *pool = (void *)__pool; 1894e22bee78STejun Heo struct work_struct *work; 1895e22bee78STejun Heo 1896493a1724STejun Heo spin_lock_irq(&workqueue_lock); /* for wq->maydays */ 1897493a1724STejun Heo spin_lock(&pool->lock); 1898e22bee78STejun Heo 189963d95a91STejun Heo if (need_to_create_worker(pool)) { 1900e22bee78STejun Heo /* 1901e22bee78STejun Heo * We've been trying to create a new worker but 1902e22bee78STejun Heo * haven't been successful. We might be hitting an 1903e22bee78STejun Heo * allocation deadlock. Send distress signals to 1904e22bee78STejun Heo * rescuers. 1905e22bee78STejun Heo */ 190663d95a91STejun Heo list_for_each_entry(work, &pool->worklist, entry) 1907e22bee78STejun Heo send_mayday(work); 1908e22bee78STejun Heo } 1909e22bee78STejun Heo 1910493a1724STejun Heo spin_unlock(&pool->lock); 1911493a1724STejun Heo spin_unlock_irq(&workqueue_lock); 1912e22bee78STejun Heo 191363d95a91STejun Heo mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); 1914e22bee78STejun Heo } 1915e22bee78STejun Heo 1916e22bee78STejun Heo /** 1917e22bee78STejun Heo * maybe_create_worker - create a new worker if necessary 191863d95a91STejun Heo * @pool: pool to create a new worker for 1919e22bee78STejun Heo * 192063d95a91STejun Heo * Create a new worker for @pool if necessary. @pool is guaranteed to 1921e22bee78STejun Heo * have at least one idle worker on return from this function. If 1922e22bee78STejun Heo * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is 192363d95a91STejun Heo * sent to all rescuers with works scheduled on @pool to resolve 1924e22bee78STejun Heo * possible allocation deadlock. 1925e22bee78STejun Heo * 1926e22bee78STejun Heo * On return, need_to_create_worker() is guaranteed to be false and 1927e22bee78STejun Heo * may_start_working() true. 1928e22bee78STejun Heo * 1929e22bee78STejun Heo * LOCKING: 1930d565ed63STejun Heo * spin_lock_irq(pool->lock) which may be released and regrabbed 1931e22bee78STejun Heo * multiple times. Does GFP_KERNEL allocations. Called only from 1932e22bee78STejun Heo * manager. 1933e22bee78STejun Heo * 1934e22bee78STejun Heo * RETURNS: 1935d565ed63STejun Heo * false if no action was taken and pool->lock stayed locked, true 1936e22bee78STejun Heo * otherwise. 1937e22bee78STejun Heo */ 193863d95a91STejun Heo static bool maybe_create_worker(struct worker_pool *pool) 1939d565ed63STejun Heo __releases(&pool->lock) 1940d565ed63STejun Heo __acquires(&pool->lock) 1941e22bee78STejun Heo { 194263d95a91STejun Heo if (!need_to_create_worker(pool)) 1943e22bee78STejun Heo return false; 1944e22bee78STejun Heo restart: 1945d565ed63STejun Heo spin_unlock_irq(&pool->lock); 19469f9c2364STejun Heo 1947e22bee78STejun Heo /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ 194863d95a91STejun Heo mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); 1949e22bee78STejun Heo 1950e22bee78STejun Heo while (true) { 1951e22bee78STejun Heo struct worker *worker; 1952e22bee78STejun Heo 1953bc2ae0f5STejun Heo worker = create_worker(pool); 1954e22bee78STejun Heo if (worker) { 195563d95a91STejun Heo del_timer_sync(&pool->mayday_timer); 1956d565ed63STejun Heo spin_lock_irq(&pool->lock); 1957e22bee78STejun Heo start_worker(worker); 19586183c009STejun Heo if (WARN_ON_ONCE(need_to_create_worker(pool))) 19596183c009STejun Heo goto restart; 1960e22bee78STejun Heo return true; 1961e22bee78STejun Heo } 1962e22bee78STejun Heo 196363d95a91STejun Heo if (!need_to_create_worker(pool)) 1964e22bee78STejun Heo break; 1965e22bee78STejun Heo 1966e22bee78STejun Heo __set_current_state(TASK_INTERRUPTIBLE); 1967e22bee78STejun Heo schedule_timeout(CREATE_COOLDOWN); 19689f9c2364STejun Heo 196963d95a91STejun Heo if (!need_to_create_worker(pool)) 1970e22bee78STejun Heo break; 1971e22bee78STejun Heo } 1972e22bee78STejun Heo 197363d95a91STejun Heo del_timer_sync(&pool->mayday_timer); 1974d565ed63STejun Heo spin_lock_irq(&pool->lock); 197563d95a91STejun Heo if (need_to_create_worker(pool)) 1976e22bee78STejun Heo goto restart; 1977e22bee78STejun Heo return true; 1978e22bee78STejun Heo } 1979e22bee78STejun Heo 1980e22bee78STejun Heo /** 1981e22bee78STejun Heo * maybe_destroy_worker - destroy workers which have been idle for a while 198263d95a91STejun Heo * @pool: pool to destroy workers for 1983e22bee78STejun Heo * 198463d95a91STejun Heo * Destroy @pool workers which have been idle for longer than 1985e22bee78STejun Heo * IDLE_WORKER_TIMEOUT. 1986e22bee78STejun Heo * 1987e22bee78STejun Heo * LOCKING: 1988d565ed63STejun Heo * spin_lock_irq(pool->lock) which may be released and regrabbed 1989e22bee78STejun Heo * multiple times. Called only from manager. 1990e22bee78STejun Heo * 1991e22bee78STejun Heo * RETURNS: 1992d565ed63STejun Heo * false if no action was taken and pool->lock stayed locked, true 1993e22bee78STejun Heo * otherwise. 1994e22bee78STejun Heo */ 199563d95a91STejun Heo static bool maybe_destroy_workers(struct worker_pool *pool) 1996e22bee78STejun Heo { 1997e22bee78STejun Heo bool ret = false; 1998e22bee78STejun Heo 199963d95a91STejun Heo while (too_many_workers(pool)) { 2000e22bee78STejun Heo struct worker *worker; 2001e22bee78STejun Heo unsigned long expires; 2002e22bee78STejun Heo 200363d95a91STejun Heo worker = list_entry(pool->idle_list.prev, struct worker, entry); 2004e22bee78STejun Heo expires = worker->last_active + IDLE_WORKER_TIMEOUT; 2005e22bee78STejun Heo 2006e22bee78STejun Heo if (time_before(jiffies, expires)) { 200763d95a91STejun Heo mod_timer(&pool->idle_timer, expires); 2008e22bee78STejun Heo break; 2009e22bee78STejun Heo } 2010e22bee78STejun Heo 2011e22bee78STejun Heo destroy_worker(worker); 2012e22bee78STejun Heo ret = true; 2013e22bee78STejun Heo } 2014e22bee78STejun Heo 2015e22bee78STejun Heo return ret; 2016e22bee78STejun Heo } 2017e22bee78STejun Heo 2018e22bee78STejun Heo /** 2019e22bee78STejun Heo * manage_workers - manage worker pool 2020e22bee78STejun Heo * @worker: self 2021e22bee78STejun Heo * 2022706026c2STejun Heo * Assume the manager role and manage the worker pool @worker belongs 2023e22bee78STejun Heo * to. At any given time, there can be only zero or one manager per 2024706026c2STejun Heo * pool. The exclusion is handled automatically by this function. 2025e22bee78STejun Heo * 2026e22bee78STejun Heo * The caller can safely start processing works on false return. On 2027e22bee78STejun Heo * true return, it's guaranteed that need_to_create_worker() is false 2028e22bee78STejun Heo * and may_start_working() is true. 2029e22bee78STejun Heo * 2030e22bee78STejun Heo * CONTEXT: 2031d565ed63STejun Heo * spin_lock_irq(pool->lock) which may be released and regrabbed 2032e22bee78STejun Heo * multiple times. Does GFP_KERNEL allocations. 2033e22bee78STejun Heo * 2034e22bee78STejun Heo * RETURNS: 2035d565ed63STejun Heo * spin_lock_irq(pool->lock) which may be released and regrabbed 2036d565ed63STejun Heo * multiple times. Does GFP_KERNEL allocations. 2037e22bee78STejun Heo */ 2038e22bee78STejun Heo static bool manage_workers(struct worker *worker) 2039e22bee78STejun Heo { 204063d95a91STejun Heo struct worker_pool *pool = worker->pool; 2041e22bee78STejun Heo bool ret = false; 2042e22bee78STejun Heo 204334a06bd6STejun Heo if (!mutex_trylock(&pool->manager_arb)) 2044e22bee78STejun Heo return ret; 2045e22bee78STejun Heo 2046ee378aa4SLai Jiangshan /* 2047ee378aa4SLai Jiangshan * To simplify both worker management and CPU hotplug, hold off 2048ee378aa4SLai Jiangshan * management while hotplug is in progress. CPU hotplug path can't 204934a06bd6STejun Heo * grab @pool->manager_arb to achieve this because that can lead to 205034a06bd6STejun Heo * idle worker depletion (all become busy thinking someone else is 205134a06bd6STejun Heo * managing) which in turn can result in deadlock under extreme 205234a06bd6STejun Heo * circumstances. Use @pool->assoc_mutex to synchronize manager 205334a06bd6STejun Heo * against CPU hotplug. 2054ee378aa4SLai Jiangshan * 2055b2eb83d1SLai Jiangshan * assoc_mutex would always be free unless CPU hotplug is in 2056d565ed63STejun Heo * progress. trylock first without dropping @pool->lock. 2057ee378aa4SLai Jiangshan */ 2058b2eb83d1SLai Jiangshan if (unlikely(!mutex_trylock(&pool->assoc_mutex))) { 2059d565ed63STejun Heo spin_unlock_irq(&pool->lock); 2060b2eb83d1SLai Jiangshan mutex_lock(&pool->assoc_mutex); 2061ee378aa4SLai Jiangshan /* 2062ee378aa4SLai Jiangshan * CPU hotplug could have happened while we were waiting 2063b2eb83d1SLai Jiangshan * for assoc_mutex. Hotplug itself can't handle us 2064ee378aa4SLai Jiangshan * because manager isn't either on idle or busy list, and 2065706026c2STejun Heo * @pool's state and ours could have deviated. 2066ee378aa4SLai Jiangshan * 2067b2eb83d1SLai Jiangshan * As hotplug is now excluded via assoc_mutex, we can 2068ee378aa4SLai Jiangshan * simply try to bind. It will succeed or fail depending 2069706026c2STejun Heo * on @pool's current state. Try it and adjust 2070ee378aa4SLai Jiangshan * %WORKER_UNBOUND accordingly. 2071ee378aa4SLai Jiangshan */ 2072f36dc67bSLai Jiangshan if (worker_maybe_bind_and_lock(pool)) 2073ee378aa4SLai Jiangshan worker->flags &= ~WORKER_UNBOUND; 2074ee378aa4SLai Jiangshan else 2075ee378aa4SLai Jiangshan worker->flags |= WORKER_UNBOUND; 2076ee378aa4SLai Jiangshan 2077ee378aa4SLai Jiangshan ret = true; 2078ee378aa4SLai Jiangshan } 2079ee378aa4SLai Jiangshan 208011ebea50STejun Heo pool->flags &= ~POOL_MANAGE_WORKERS; 2081e22bee78STejun Heo 2082e22bee78STejun Heo /* 2083e22bee78STejun Heo * Destroy and then create so that may_start_working() is true 2084e22bee78STejun Heo * on return. 2085e22bee78STejun Heo */ 208663d95a91STejun Heo ret |= maybe_destroy_workers(pool); 208763d95a91STejun Heo ret |= maybe_create_worker(pool); 2088e22bee78STejun Heo 2089b2eb83d1SLai Jiangshan mutex_unlock(&pool->assoc_mutex); 209034a06bd6STejun Heo mutex_unlock(&pool->manager_arb); 2091e22bee78STejun Heo return ret; 2092e22bee78STejun Heo } 2093e22bee78STejun Heo 2094a62428c0STejun Heo /** 2095a62428c0STejun Heo * process_one_work - process single work 2096c34056a3STejun Heo * @worker: self 2097a62428c0STejun Heo * @work: work to process 2098a62428c0STejun Heo * 2099a62428c0STejun Heo * Process @work. This function contains all the logics necessary to 2100a62428c0STejun Heo * process a single work including synchronization against and 2101a62428c0STejun Heo * interaction with other workers on the same cpu, queueing and 2102a62428c0STejun Heo * flushing. As long as context requirement is met, any worker can 2103a62428c0STejun Heo * call this function to process a work. 2104a62428c0STejun Heo * 2105a62428c0STejun Heo * CONTEXT: 2106d565ed63STejun Heo * spin_lock_irq(pool->lock) which is released and regrabbed. 2107a62428c0STejun Heo */ 2108c34056a3STejun Heo static void process_one_work(struct worker *worker, struct work_struct *work) 2109d565ed63STejun Heo __releases(&pool->lock) 2110d565ed63STejun Heo __acquires(&pool->lock) 21111da177e4SLinus Torvalds { 2112112202d9STejun Heo struct pool_workqueue *pwq = get_work_pwq(work); 2113bd7bdd43STejun Heo struct worker_pool *pool = worker->pool; 2114112202d9STejun Heo bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE; 211573f53c4aSTejun Heo int work_color; 21167e11629dSTejun Heo struct worker *collision; 21174e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP 21184e6045f1SJohannes Berg /* 2119a62428c0STejun Heo * It is permissible to free the struct work_struct from 2120a62428c0STejun Heo * inside the function that is called from it, this we need to 2121a62428c0STejun Heo * take into account for lockdep too. To avoid bogus "held 2122a62428c0STejun Heo * lock freed" warnings as well as problems when looking into 2123a62428c0STejun Heo * work->lockdep_map, make a copy and use that here. 21244e6045f1SJohannes Berg */ 21254d82a1deSPeter Zijlstra struct lockdep_map lockdep_map; 21264d82a1deSPeter Zijlstra 21274d82a1deSPeter Zijlstra lockdep_copy_map(&lockdep_map, &work->lockdep_map); 21284e6045f1SJohannes Berg #endif 21296fec10a1STejun Heo /* 21306fec10a1STejun Heo * Ensure we're on the correct CPU. DISASSOCIATED test is 21316fec10a1STejun Heo * necessary to avoid spurious warnings from rescuers servicing the 213224647570STejun Heo * unbound or a disassociated pool. 21336fec10a1STejun Heo */ 21345f7dabfdSLai Jiangshan WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) && 213524647570STejun Heo !(pool->flags & POOL_DISASSOCIATED) && 2136ec22ca5eSTejun Heo raw_smp_processor_id() != pool->cpu); 213725511a47STejun Heo 21387e11629dSTejun Heo /* 21397e11629dSTejun Heo * A single work shouldn't be executed concurrently by 21407e11629dSTejun Heo * multiple workers on a single cpu. Check whether anyone is 21417e11629dSTejun Heo * already processing the work. If so, defer the work to the 21427e11629dSTejun Heo * currently executing one. 21437e11629dSTejun Heo */ 2144c9e7cf27STejun Heo collision = find_worker_executing_work(pool, work); 21457e11629dSTejun Heo if (unlikely(collision)) { 21467e11629dSTejun Heo move_linked_works(work, &collision->scheduled, NULL); 21477e11629dSTejun Heo return; 21487e11629dSTejun Heo } 21491da177e4SLinus Torvalds 21508930cabaSTejun Heo /* claim and dequeue */ 21511da177e4SLinus Torvalds debug_work_deactivate(work); 2152c9e7cf27STejun Heo hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); 2153c34056a3STejun Heo worker->current_work = work; 2154a2c1c57bSTejun Heo worker->current_func = work->func; 2155112202d9STejun Heo worker->current_pwq = pwq; 215673f53c4aSTejun Heo work_color = get_work_color(work); 21577a22ad75STejun Heo 2158a62428c0STejun Heo list_del_init(&work->entry); 2159a62428c0STejun Heo 2160649027d7STejun Heo /* 2161fb0e7bebSTejun Heo * CPU intensive works don't participate in concurrency 2162fb0e7bebSTejun Heo * management. They're the scheduler's responsibility. 2163fb0e7bebSTejun Heo */ 2164fb0e7bebSTejun Heo if (unlikely(cpu_intensive)) 2165fb0e7bebSTejun Heo worker_set_flags(worker, WORKER_CPU_INTENSIVE, true); 2166fb0e7bebSTejun Heo 2167974271c4STejun Heo /* 2168d565ed63STejun Heo * Unbound pool isn't concurrency managed and work items should be 2169974271c4STejun Heo * executed ASAP. Wake up another worker if necessary. 2170974271c4STejun Heo */ 217163d95a91STejun Heo if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool)) 217263d95a91STejun Heo wake_up_worker(pool); 2173974271c4STejun Heo 21748930cabaSTejun Heo /* 21757c3eed5cSTejun Heo * Record the last pool and clear PENDING which should be the last 2176d565ed63STejun Heo * update to @work. Also, do this inside @pool->lock so that 217723657bb1STejun Heo * PENDING and queued state changes happen together while IRQ is 217823657bb1STejun Heo * disabled. 21798930cabaSTejun Heo */ 21807c3eed5cSTejun Heo set_work_pool_and_clear_pending(work, pool->id); 21811da177e4SLinus Torvalds 2182d565ed63STejun Heo spin_unlock_irq(&pool->lock); 2183365970a1SDavid Howells 2184112202d9STejun Heo lock_map_acquire_read(&pwq->wq->lockdep_map); 21853295f0efSIngo Molnar lock_map_acquire(&lockdep_map); 2186e36c886aSArjan van de Ven trace_workqueue_execute_start(work); 2187a2c1c57bSTejun Heo worker->current_func(work); 2188e36c886aSArjan van de Ven /* 2189e36c886aSArjan van de Ven * While we must be careful to not use "work" after this, the trace 2190e36c886aSArjan van de Ven * point will only record its address. 2191e36c886aSArjan van de Ven */ 2192e36c886aSArjan van de Ven trace_workqueue_execute_end(work); 21933295f0efSIngo Molnar lock_map_release(&lockdep_map); 2194112202d9STejun Heo lock_map_release(&pwq->wq->lockdep_map); 21951da177e4SLinus Torvalds 2196d5abe669SPeter Zijlstra if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 2197044c782cSValentin Ilie pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" 2198044c782cSValentin Ilie " last function: %pf\n", 2199a2c1c57bSTejun Heo current->comm, preempt_count(), task_pid_nr(current), 2200a2c1c57bSTejun Heo worker->current_func); 2201d5abe669SPeter Zijlstra debug_show_held_locks(current); 2202d5abe669SPeter Zijlstra dump_stack(); 2203d5abe669SPeter Zijlstra } 2204d5abe669SPeter Zijlstra 2205d565ed63STejun Heo spin_lock_irq(&pool->lock); 2206a62428c0STejun Heo 2207fb0e7bebSTejun Heo /* clear cpu intensive status */ 2208fb0e7bebSTejun Heo if (unlikely(cpu_intensive)) 2209fb0e7bebSTejun Heo worker_clr_flags(worker, WORKER_CPU_INTENSIVE); 2210fb0e7bebSTejun Heo 2211a62428c0STejun Heo /* we're done with it, release */ 221242f8570fSSasha Levin hash_del(&worker->hentry); 2213c34056a3STejun Heo worker->current_work = NULL; 2214a2c1c57bSTejun Heo worker->current_func = NULL; 2215112202d9STejun Heo worker->current_pwq = NULL; 2216112202d9STejun Heo pwq_dec_nr_in_flight(pwq, work_color); 22171da177e4SLinus Torvalds } 22181da177e4SLinus Torvalds 2219affee4b2STejun Heo /** 2220affee4b2STejun Heo * process_scheduled_works - process scheduled works 2221affee4b2STejun Heo * @worker: self 2222affee4b2STejun Heo * 2223affee4b2STejun Heo * Process all scheduled works. Please note that the scheduled list 2224affee4b2STejun Heo * may change while processing a work, so this function repeatedly 2225affee4b2STejun Heo * fetches a work from the top and executes it. 2226affee4b2STejun Heo * 2227affee4b2STejun Heo * CONTEXT: 2228d565ed63STejun Heo * spin_lock_irq(pool->lock) which may be released and regrabbed 2229affee4b2STejun Heo * multiple times. 2230affee4b2STejun Heo */ 2231affee4b2STejun Heo static void process_scheduled_works(struct worker *worker) 22321da177e4SLinus Torvalds { 2233affee4b2STejun Heo while (!list_empty(&worker->scheduled)) { 2234affee4b2STejun Heo struct work_struct *work = list_first_entry(&worker->scheduled, 2235a62428c0STejun Heo struct work_struct, entry); 2236c34056a3STejun Heo process_one_work(worker, work); 2237a62428c0STejun Heo } 22381da177e4SLinus Torvalds } 22391da177e4SLinus Torvalds 22404690c4abSTejun Heo /** 22414690c4abSTejun Heo * worker_thread - the worker thread function 2242c34056a3STejun Heo * @__worker: self 22434690c4abSTejun Heo * 2244706026c2STejun Heo * The worker thread function. There are NR_CPU_WORKER_POOLS dynamic pools 2245706026c2STejun Heo * of these per each cpu. These workers process all works regardless of 2246e22bee78STejun Heo * their specific target workqueue. The only exception is works which 2247e22bee78STejun Heo * belong to workqueues with a rescuer which will be explained in 2248e22bee78STejun Heo * rescuer_thread(). 22494690c4abSTejun Heo */ 2250c34056a3STejun Heo static int worker_thread(void *__worker) 22511da177e4SLinus Torvalds { 2252c34056a3STejun Heo struct worker *worker = __worker; 2253bd7bdd43STejun Heo struct worker_pool *pool = worker->pool; 22541da177e4SLinus Torvalds 2255e22bee78STejun Heo /* tell the scheduler that this is a workqueue worker */ 2256e22bee78STejun Heo worker->task->flags |= PF_WQ_WORKER; 2257c8e55f36STejun Heo woke_up: 2258d565ed63STejun Heo spin_lock_irq(&pool->lock); 2259affee4b2STejun Heo 22605f7dabfdSLai Jiangshan /* we are off idle list if destruction or rebind is requested */ 22615f7dabfdSLai Jiangshan if (unlikely(list_empty(&worker->entry))) { 2262d565ed63STejun Heo spin_unlock_irq(&pool->lock); 226325511a47STejun Heo 22645f7dabfdSLai Jiangshan /* if DIE is set, destruction is requested */ 226525511a47STejun Heo if (worker->flags & WORKER_DIE) { 2266e22bee78STejun Heo worker->task->flags &= ~PF_WQ_WORKER; 2267c8e55f36STejun Heo return 0; 2268c8e55f36STejun Heo } 2269c8e55f36STejun Heo 22705f7dabfdSLai Jiangshan /* otherwise, rebind */ 227125511a47STejun Heo idle_worker_rebind(worker); 227225511a47STejun Heo goto woke_up; 227325511a47STejun Heo } 227425511a47STejun Heo 2275c8e55f36STejun Heo worker_leave_idle(worker); 2276db7bccf4STejun Heo recheck: 2277e22bee78STejun Heo /* no more worker necessary? */ 227863d95a91STejun Heo if (!need_more_worker(pool)) 2279e22bee78STejun Heo goto sleep; 2280e22bee78STejun Heo 2281e22bee78STejun Heo /* do we need to manage? */ 228263d95a91STejun Heo if (unlikely(!may_start_working(pool)) && manage_workers(worker)) 2283e22bee78STejun Heo goto recheck; 2284e22bee78STejun Heo 2285c8e55f36STejun Heo /* 2286c8e55f36STejun Heo * ->scheduled list can only be filled while a worker is 2287c8e55f36STejun Heo * preparing to process a work or actually processing it. 2288c8e55f36STejun Heo * Make sure nobody diddled with it while I was sleeping. 2289c8e55f36STejun Heo */ 22906183c009STejun Heo WARN_ON_ONCE(!list_empty(&worker->scheduled)); 2291c8e55f36STejun Heo 2292e22bee78STejun Heo /* 2293e22bee78STejun Heo * When control reaches this point, we're guaranteed to have 2294e22bee78STejun Heo * at least one idle worker or that someone else has already 2295e22bee78STejun Heo * assumed the manager role. 2296e22bee78STejun Heo */ 2297e22bee78STejun Heo worker_clr_flags(worker, WORKER_PREP); 2298e22bee78STejun Heo 2299e22bee78STejun Heo do { 2300affee4b2STejun Heo struct work_struct *work = 2301bd7bdd43STejun Heo list_first_entry(&pool->worklist, 2302affee4b2STejun Heo struct work_struct, entry); 2303affee4b2STejun Heo 2304c8e55f36STejun Heo if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { 2305affee4b2STejun Heo /* optimization path, not strictly necessary */ 2306affee4b2STejun Heo process_one_work(worker, work); 2307affee4b2STejun Heo if (unlikely(!list_empty(&worker->scheduled))) 2308affee4b2STejun Heo process_scheduled_works(worker); 2309affee4b2STejun Heo } else { 2310c8e55f36STejun Heo move_linked_works(work, &worker->scheduled, NULL); 2311affee4b2STejun Heo process_scheduled_works(worker); 2312affee4b2STejun Heo } 231363d95a91STejun Heo } while (keep_working(pool)); 2314affee4b2STejun Heo 2315e22bee78STejun Heo worker_set_flags(worker, WORKER_PREP, false); 2316d313dd85STejun Heo sleep: 231763d95a91STejun Heo if (unlikely(need_to_manage_workers(pool)) && manage_workers(worker)) 2318e22bee78STejun Heo goto recheck; 2319d313dd85STejun Heo 2320c8e55f36STejun Heo /* 2321d565ed63STejun Heo * pool->lock is held and there's no work to process and no need to 2322d565ed63STejun Heo * manage, sleep. Workers are woken up only while holding 2323d565ed63STejun Heo * pool->lock or from local cpu, so setting the current state 2324d565ed63STejun Heo * before releasing pool->lock is enough to prevent losing any 2325d565ed63STejun Heo * event. 2326c8e55f36STejun Heo */ 2327c8e55f36STejun Heo worker_enter_idle(worker); 2328c8e55f36STejun Heo __set_current_state(TASK_INTERRUPTIBLE); 2329d565ed63STejun Heo spin_unlock_irq(&pool->lock); 23301da177e4SLinus Torvalds schedule(); 2331c8e55f36STejun Heo goto woke_up; 23321da177e4SLinus Torvalds } 23331da177e4SLinus Torvalds 2334e22bee78STejun Heo /** 2335e22bee78STejun Heo * rescuer_thread - the rescuer thread function 2336111c225aSTejun Heo * @__rescuer: self 2337e22bee78STejun Heo * 2338e22bee78STejun Heo * Workqueue rescuer thread function. There's one rescuer for each 2339493008a8STejun Heo * workqueue which has WQ_MEM_RECLAIM set. 2340e22bee78STejun Heo * 2341706026c2STejun Heo * Regular work processing on a pool may block trying to create a new 2342e22bee78STejun Heo * worker which uses GFP_KERNEL allocation which has slight chance of 2343e22bee78STejun Heo * developing into deadlock if some works currently on the same queue 2344e22bee78STejun Heo * need to be processed to satisfy the GFP_KERNEL allocation. This is 2345e22bee78STejun Heo * the problem rescuer solves. 2346e22bee78STejun Heo * 2347706026c2STejun Heo * When such condition is possible, the pool summons rescuers of all 2348706026c2STejun Heo * workqueues which have works queued on the pool and let them process 2349e22bee78STejun Heo * those works so that forward progress can be guaranteed. 2350e22bee78STejun Heo * 2351e22bee78STejun Heo * This should happen rarely. 2352e22bee78STejun Heo */ 2353111c225aSTejun Heo static int rescuer_thread(void *__rescuer) 2354e22bee78STejun Heo { 2355111c225aSTejun Heo struct worker *rescuer = __rescuer; 2356111c225aSTejun Heo struct workqueue_struct *wq = rescuer->rescue_wq; 2357e22bee78STejun Heo struct list_head *scheduled = &rescuer->scheduled; 2358e22bee78STejun Heo 2359e22bee78STejun Heo set_user_nice(current, RESCUER_NICE_LEVEL); 2360111c225aSTejun Heo 2361111c225aSTejun Heo /* 2362111c225aSTejun Heo * Mark rescuer as worker too. As WORKER_PREP is never cleared, it 2363111c225aSTejun Heo * doesn't participate in concurrency management. 2364111c225aSTejun Heo */ 2365111c225aSTejun Heo rescuer->task->flags |= PF_WQ_WORKER; 2366e22bee78STejun Heo repeat: 2367e22bee78STejun Heo set_current_state(TASK_INTERRUPTIBLE); 23681da177e4SLinus Torvalds 2369412d32e6SMike Galbraith if (kthread_should_stop()) { 2370412d32e6SMike Galbraith __set_current_state(TASK_RUNNING); 2371111c225aSTejun Heo rescuer->task->flags &= ~PF_WQ_WORKER; 2372e22bee78STejun Heo return 0; 2373412d32e6SMike Galbraith } 23741da177e4SLinus Torvalds 2375493a1724STejun Heo /* see whether any pwq is asking for help */ 2376493a1724STejun Heo spin_lock_irq(&workqueue_lock); 2377493a1724STejun Heo 2378493a1724STejun Heo while (!list_empty(&wq->maydays)) { 2379493a1724STejun Heo struct pool_workqueue *pwq = list_first_entry(&wq->maydays, 2380493a1724STejun Heo struct pool_workqueue, mayday_node); 2381112202d9STejun Heo struct worker_pool *pool = pwq->pool; 2382e22bee78STejun Heo struct work_struct *work, *n; 2383e22bee78STejun Heo 2384e22bee78STejun Heo __set_current_state(TASK_RUNNING); 2385493a1724STejun Heo list_del_init(&pwq->mayday_node); 2386493a1724STejun Heo 2387493a1724STejun Heo spin_unlock_irq(&workqueue_lock); 2388e22bee78STejun Heo 2389e22bee78STejun Heo /* migrate to the target cpu if possible */ 2390f36dc67bSLai Jiangshan worker_maybe_bind_and_lock(pool); 2391b3104104SLai Jiangshan rescuer->pool = pool; 2392e22bee78STejun Heo 2393e22bee78STejun Heo /* 2394e22bee78STejun Heo * Slurp in all works issued via this workqueue and 2395e22bee78STejun Heo * process'em. 2396e22bee78STejun Heo */ 23976183c009STejun Heo WARN_ON_ONCE(!list_empty(&rescuer->scheduled)); 2398bd7bdd43STejun Heo list_for_each_entry_safe(work, n, &pool->worklist, entry) 2399112202d9STejun Heo if (get_work_pwq(work) == pwq) 2400e22bee78STejun Heo move_linked_works(work, scheduled, &n); 2401e22bee78STejun Heo 2402e22bee78STejun Heo process_scheduled_works(rescuer); 24037576958aSTejun Heo 24047576958aSTejun Heo /* 2405d565ed63STejun Heo * Leave this pool. If keep_working() is %true, notify a 24067576958aSTejun Heo * regular worker; otherwise, we end up with 0 concurrency 24077576958aSTejun Heo * and stalling the execution. 24087576958aSTejun Heo */ 240963d95a91STejun Heo if (keep_working(pool)) 241063d95a91STejun Heo wake_up_worker(pool); 24117576958aSTejun Heo 2412b3104104SLai Jiangshan rescuer->pool = NULL; 2413493a1724STejun Heo spin_unlock(&pool->lock); 2414493a1724STejun Heo spin_lock(&workqueue_lock); 24151da177e4SLinus Torvalds } 24161da177e4SLinus Torvalds 2417493a1724STejun Heo spin_unlock_irq(&workqueue_lock); 2418493a1724STejun Heo 2419111c225aSTejun Heo /* rescuers should never participate in concurrency management */ 2420111c225aSTejun Heo WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING)); 2421e22bee78STejun Heo schedule(); 2422e22bee78STejun Heo goto repeat; 24231da177e4SLinus Torvalds } 24241da177e4SLinus Torvalds 2425fc2e4d70SOleg Nesterov struct wq_barrier { 2426fc2e4d70SOleg Nesterov struct work_struct work; 2427fc2e4d70SOleg Nesterov struct completion done; 2428fc2e4d70SOleg Nesterov }; 2429fc2e4d70SOleg Nesterov 2430fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work) 2431fc2e4d70SOleg Nesterov { 2432fc2e4d70SOleg Nesterov struct wq_barrier *barr = container_of(work, struct wq_barrier, work); 2433fc2e4d70SOleg Nesterov complete(&barr->done); 2434fc2e4d70SOleg Nesterov } 2435fc2e4d70SOleg Nesterov 24364690c4abSTejun Heo /** 24374690c4abSTejun Heo * insert_wq_barrier - insert a barrier work 2438112202d9STejun Heo * @pwq: pwq to insert barrier into 24394690c4abSTejun Heo * @barr: wq_barrier to insert 2440affee4b2STejun Heo * @target: target work to attach @barr to 2441affee4b2STejun Heo * @worker: worker currently executing @target, NULL if @target is not executing 24424690c4abSTejun Heo * 2443affee4b2STejun Heo * @barr is linked to @target such that @barr is completed only after 2444affee4b2STejun Heo * @target finishes execution. Please note that the ordering 2445affee4b2STejun Heo * guarantee is observed only with respect to @target and on the local 2446affee4b2STejun Heo * cpu. 2447affee4b2STejun Heo * 2448affee4b2STejun Heo * Currently, a queued barrier can't be canceled. This is because 2449affee4b2STejun Heo * try_to_grab_pending() can't determine whether the work to be 2450affee4b2STejun Heo * grabbed is at the head of the queue and thus can't clear LINKED 2451affee4b2STejun Heo * flag of the previous work while there must be a valid next work 2452affee4b2STejun Heo * after a work with LINKED flag set. 2453affee4b2STejun Heo * 2454affee4b2STejun Heo * Note that when @worker is non-NULL, @target may be modified 2455112202d9STejun Heo * underneath us, so we can't reliably determine pwq from @target. 24564690c4abSTejun Heo * 24574690c4abSTejun Heo * CONTEXT: 2458d565ed63STejun Heo * spin_lock_irq(pool->lock). 24594690c4abSTejun Heo */ 2460112202d9STejun Heo static void insert_wq_barrier(struct pool_workqueue *pwq, 2461affee4b2STejun Heo struct wq_barrier *barr, 2462affee4b2STejun Heo struct work_struct *target, struct worker *worker) 2463fc2e4d70SOleg Nesterov { 2464affee4b2STejun Heo struct list_head *head; 2465affee4b2STejun Heo unsigned int linked = 0; 2466affee4b2STejun Heo 2467dc186ad7SThomas Gleixner /* 2468d565ed63STejun Heo * debugobject calls are safe here even with pool->lock locked 2469dc186ad7SThomas Gleixner * as we know for sure that this will not trigger any of the 2470dc186ad7SThomas Gleixner * checks and call back into the fixup functions where we 2471dc186ad7SThomas Gleixner * might deadlock. 2472dc186ad7SThomas Gleixner */ 2473ca1cab37SAndrew Morton INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); 247422df02bbSTejun Heo __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); 2475fc2e4d70SOleg Nesterov init_completion(&barr->done); 247683c22520SOleg Nesterov 2477affee4b2STejun Heo /* 2478affee4b2STejun Heo * If @target is currently being executed, schedule the 2479affee4b2STejun Heo * barrier to the worker; otherwise, put it after @target. 2480affee4b2STejun Heo */ 2481affee4b2STejun Heo if (worker) 2482affee4b2STejun Heo head = worker->scheduled.next; 2483affee4b2STejun Heo else { 2484affee4b2STejun Heo unsigned long *bits = work_data_bits(target); 2485affee4b2STejun Heo 2486affee4b2STejun Heo head = target->entry.next; 2487affee4b2STejun Heo /* there can already be other linked works, inherit and set */ 2488affee4b2STejun Heo linked = *bits & WORK_STRUCT_LINKED; 2489affee4b2STejun Heo __set_bit(WORK_STRUCT_LINKED_BIT, bits); 2490affee4b2STejun Heo } 2491affee4b2STejun Heo 2492dc186ad7SThomas Gleixner debug_work_activate(&barr->work); 2493112202d9STejun Heo insert_work(pwq, &barr->work, head, 2494affee4b2STejun Heo work_color_to_flags(WORK_NO_COLOR) | linked); 2495fc2e4d70SOleg Nesterov } 2496fc2e4d70SOleg Nesterov 249773f53c4aSTejun Heo /** 2498112202d9STejun Heo * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing 249973f53c4aSTejun Heo * @wq: workqueue being flushed 250073f53c4aSTejun Heo * @flush_color: new flush color, < 0 for no-op 250173f53c4aSTejun Heo * @work_color: new work color, < 0 for no-op 250273f53c4aSTejun Heo * 2503112202d9STejun Heo * Prepare pwqs for workqueue flushing. 250473f53c4aSTejun Heo * 2505112202d9STejun Heo * If @flush_color is non-negative, flush_color on all pwqs should be 2506112202d9STejun Heo * -1. If no pwq has in-flight commands at the specified color, all 2507112202d9STejun Heo * pwq->flush_color's stay at -1 and %false is returned. If any pwq 2508112202d9STejun Heo * has in flight commands, its pwq->flush_color is set to 2509112202d9STejun Heo * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq 251073f53c4aSTejun Heo * wakeup logic is armed and %true is returned. 251173f53c4aSTejun Heo * 251273f53c4aSTejun Heo * The caller should have initialized @wq->first_flusher prior to 251373f53c4aSTejun Heo * calling this function with non-negative @flush_color. If 251473f53c4aSTejun Heo * @flush_color is negative, no flush color update is done and %false 251573f53c4aSTejun Heo * is returned. 251673f53c4aSTejun Heo * 2517112202d9STejun Heo * If @work_color is non-negative, all pwqs should have the same 251873f53c4aSTejun Heo * work_color which is previous to @work_color and all will be 251973f53c4aSTejun Heo * advanced to @work_color. 252073f53c4aSTejun Heo * 252173f53c4aSTejun Heo * CONTEXT: 252273f53c4aSTejun Heo * mutex_lock(wq->flush_mutex). 252373f53c4aSTejun Heo * 252473f53c4aSTejun Heo * RETURNS: 252573f53c4aSTejun Heo * %true if @flush_color >= 0 and there's something to flush. %false 252673f53c4aSTejun Heo * otherwise. 252773f53c4aSTejun Heo */ 2528112202d9STejun Heo static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, 252973f53c4aSTejun Heo int flush_color, int work_color) 25301da177e4SLinus Torvalds { 253173f53c4aSTejun Heo bool wait = false; 253249e3cf44STejun Heo struct pool_workqueue *pwq; 25331da177e4SLinus Torvalds 253473f53c4aSTejun Heo if (flush_color >= 0) { 25356183c009STejun Heo WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush)); 2536112202d9STejun Heo atomic_set(&wq->nr_pwqs_to_flush, 1); 2537dc186ad7SThomas Gleixner } 253814441960SOleg Nesterov 253976af4d93STejun Heo local_irq_disable(); 254076af4d93STejun Heo 254149e3cf44STejun Heo for_each_pwq(pwq, wq) { 2542112202d9STejun Heo struct worker_pool *pool = pwq->pool; 25431da177e4SLinus Torvalds 254476af4d93STejun Heo spin_lock(&pool->lock); 254573f53c4aSTejun Heo 254673f53c4aSTejun Heo if (flush_color >= 0) { 25476183c009STejun Heo WARN_ON_ONCE(pwq->flush_color != -1); 254873f53c4aSTejun Heo 2549112202d9STejun Heo if (pwq->nr_in_flight[flush_color]) { 2550112202d9STejun Heo pwq->flush_color = flush_color; 2551112202d9STejun Heo atomic_inc(&wq->nr_pwqs_to_flush); 255273f53c4aSTejun Heo wait = true; 25531da177e4SLinus Torvalds } 255473f53c4aSTejun Heo } 255573f53c4aSTejun Heo 255673f53c4aSTejun Heo if (work_color >= 0) { 25576183c009STejun Heo WARN_ON_ONCE(work_color != work_next_color(pwq->work_color)); 2558112202d9STejun Heo pwq->work_color = work_color; 255973f53c4aSTejun Heo } 256073f53c4aSTejun Heo 256176af4d93STejun Heo spin_unlock(&pool->lock); 25621da177e4SLinus Torvalds } 25631da177e4SLinus Torvalds 256476af4d93STejun Heo local_irq_enable(); 256576af4d93STejun Heo 2566112202d9STejun Heo if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) 256773f53c4aSTejun Heo complete(&wq->first_flusher->done); 256873f53c4aSTejun Heo 256973f53c4aSTejun Heo return wait; 257083c22520SOleg Nesterov } 25711da177e4SLinus Torvalds 25720fcb78c2SRolf Eike Beer /** 25731da177e4SLinus Torvalds * flush_workqueue - ensure that any scheduled work has run to completion. 25740fcb78c2SRolf Eike Beer * @wq: workqueue to flush 25751da177e4SLinus Torvalds * 25761da177e4SLinus Torvalds * Forces execution of the workqueue and blocks until its completion. 25771da177e4SLinus Torvalds * This is typically used in driver shutdown handlers. 25781da177e4SLinus Torvalds * 2579fc2e4d70SOleg Nesterov * We sleep until all works which were queued on entry have been handled, 2580fc2e4d70SOleg Nesterov * but we are not livelocked by new incoming ones. 25811da177e4SLinus Torvalds */ 25827ad5b3a5SHarvey Harrison void flush_workqueue(struct workqueue_struct *wq) 25831da177e4SLinus Torvalds { 258473f53c4aSTejun Heo struct wq_flusher this_flusher = { 258573f53c4aSTejun Heo .list = LIST_HEAD_INIT(this_flusher.list), 258673f53c4aSTejun Heo .flush_color = -1, 258773f53c4aSTejun Heo .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done), 258873f53c4aSTejun Heo }; 258973f53c4aSTejun Heo int next_color; 2590b1f4ec17SOleg Nesterov 25913295f0efSIngo Molnar lock_map_acquire(&wq->lockdep_map); 25923295f0efSIngo Molnar lock_map_release(&wq->lockdep_map); 259373f53c4aSTejun Heo 259473f53c4aSTejun Heo mutex_lock(&wq->flush_mutex); 259573f53c4aSTejun Heo 259673f53c4aSTejun Heo /* 259773f53c4aSTejun Heo * Start-to-wait phase 259873f53c4aSTejun Heo */ 259973f53c4aSTejun Heo next_color = work_next_color(wq->work_color); 260073f53c4aSTejun Heo 260173f53c4aSTejun Heo if (next_color != wq->flush_color) { 260273f53c4aSTejun Heo /* 260373f53c4aSTejun Heo * Color space is not full. The current work_color 260473f53c4aSTejun Heo * becomes our flush_color and work_color is advanced 260573f53c4aSTejun Heo * by one. 260673f53c4aSTejun Heo */ 26076183c009STejun Heo WARN_ON_ONCE(!list_empty(&wq->flusher_overflow)); 260873f53c4aSTejun Heo this_flusher.flush_color = wq->work_color; 260973f53c4aSTejun Heo wq->work_color = next_color; 261073f53c4aSTejun Heo 261173f53c4aSTejun Heo if (!wq->first_flusher) { 261273f53c4aSTejun Heo /* no flush in progress, become the first flusher */ 26136183c009STejun Heo WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 261473f53c4aSTejun Heo 261573f53c4aSTejun Heo wq->first_flusher = &this_flusher; 261673f53c4aSTejun Heo 2617112202d9STejun Heo if (!flush_workqueue_prep_pwqs(wq, wq->flush_color, 261873f53c4aSTejun Heo wq->work_color)) { 261973f53c4aSTejun Heo /* nothing to flush, done */ 262073f53c4aSTejun Heo wq->flush_color = next_color; 262173f53c4aSTejun Heo wq->first_flusher = NULL; 262273f53c4aSTejun Heo goto out_unlock; 262373f53c4aSTejun Heo } 262473f53c4aSTejun Heo } else { 262573f53c4aSTejun Heo /* wait in queue */ 26266183c009STejun Heo WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color); 262773f53c4aSTejun Heo list_add_tail(&this_flusher.list, &wq->flusher_queue); 2628112202d9STejun Heo flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 262973f53c4aSTejun Heo } 263073f53c4aSTejun Heo } else { 263173f53c4aSTejun Heo /* 263273f53c4aSTejun Heo * Oops, color space is full, wait on overflow queue. 263373f53c4aSTejun Heo * The next flush completion will assign us 263473f53c4aSTejun Heo * flush_color and transfer to flusher_queue. 263573f53c4aSTejun Heo */ 263673f53c4aSTejun Heo list_add_tail(&this_flusher.list, &wq->flusher_overflow); 263773f53c4aSTejun Heo } 263873f53c4aSTejun Heo 263973f53c4aSTejun Heo mutex_unlock(&wq->flush_mutex); 264073f53c4aSTejun Heo 264173f53c4aSTejun Heo wait_for_completion(&this_flusher.done); 264273f53c4aSTejun Heo 264373f53c4aSTejun Heo /* 264473f53c4aSTejun Heo * Wake-up-and-cascade phase 264573f53c4aSTejun Heo * 264673f53c4aSTejun Heo * First flushers are responsible for cascading flushes and 264773f53c4aSTejun Heo * handling overflow. Non-first flushers can simply return. 264873f53c4aSTejun Heo */ 264973f53c4aSTejun Heo if (wq->first_flusher != &this_flusher) 265073f53c4aSTejun Heo return; 265173f53c4aSTejun Heo 265273f53c4aSTejun Heo mutex_lock(&wq->flush_mutex); 265373f53c4aSTejun Heo 26544ce48b37STejun Heo /* we might have raced, check again with mutex held */ 26554ce48b37STejun Heo if (wq->first_flusher != &this_flusher) 26564ce48b37STejun Heo goto out_unlock; 26574ce48b37STejun Heo 265873f53c4aSTejun Heo wq->first_flusher = NULL; 265973f53c4aSTejun Heo 26606183c009STejun Heo WARN_ON_ONCE(!list_empty(&this_flusher.list)); 26616183c009STejun Heo WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 266273f53c4aSTejun Heo 266373f53c4aSTejun Heo while (true) { 266473f53c4aSTejun Heo struct wq_flusher *next, *tmp; 266573f53c4aSTejun Heo 266673f53c4aSTejun Heo /* complete all the flushers sharing the current flush color */ 266773f53c4aSTejun Heo list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { 266873f53c4aSTejun Heo if (next->flush_color != wq->flush_color) 266973f53c4aSTejun Heo break; 267073f53c4aSTejun Heo list_del_init(&next->list); 267173f53c4aSTejun Heo complete(&next->done); 267273f53c4aSTejun Heo } 267373f53c4aSTejun Heo 26746183c009STejun Heo WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) && 267573f53c4aSTejun Heo wq->flush_color != work_next_color(wq->work_color)); 267673f53c4aSTejun Heo 267773f53c4aSTejun Heo /* this flush_color is finished, advance by one */ 267873f53c4aSTejun Heo wq->flush_color = work_next_color(wq->flush_color); 267973f53c4aSTejun Heo 268073f53c4aSTejun Heo /* one color has been freed, handle overflow queue */ 268173f53c4aSTejun Heo if (!list_empty(&wq->flusher_overflow)) { 268273f53c4aSTejun Heo /* 268373f53c4aSTejun Heo * Assign the same color to all overflowed 268473f53c4aSTejun Heo * flushers, advance work_color and append to 268573f53c4aSTejun Heo * flusher_queue. This is the start-to-wait 268673f53c4aSTejun Heo * phase for these overflowed flushers. 268773f53c4aSTejun Heo */ 268873f53c4aSTejun Heo list_for_each_entry(tmp, &wq->flusher_overflow, list) 268973f53c4aSTejun Heo tmp->flush_color = wq->work_color; 269073f53c4aSTejun Heo 269173f53c4aSTejun Heo wq->work_color = work_next_color(wq->work_color); 269273f53c4aSTejun Heo 269373f53c4aSTejun Heo list_splice_tail_init(&wq->flusher_overflow, 269473f53c4aSTejun Heo &wq->flusher_queue); 2695112202d9STejun Heo flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 269673f53c4aSTejun Heo } 269773f53c4aSTejun Heo 269873f53c4aSTejun Heo if (list_empty(&wq->flusher_queue)) { 26996183c009STejun Heo WARN_ON_ONCE(wq->flush_color != wq->work_color); 270073f53c4aSTejun Heo break; 270173f53c4aSTejun Heo } 270273f53c4aSTejun Heo 270373f53c4aSTejun Heo /* 270473f53c4aSTejun Heo * Need to flush more colors. Make the next flusher 2705112202d9STejun Heo * the new first flusher and arm pwqs. 270673f53c4aSTejun Heo */ 27076183c009STejun Heo WARN_ON_ONCE(wq->flush_color == wq->work_color); 27086183c009STejun Heo WARN_ON_ONCE(wq->flush_color != next->flush_color); 270973f53c4aSTejun Heo 271073f53c4aSTejun Heo list_del_init(&next->list); 271173f53c4aSTejun Heo wq->first_flusher = next; 271273f53c4aSTejun Heo 2713112202d9STejun Heo if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1)) 271473f53c4aSTejun Heo break; 271573f53c4aSTejun Heo 271673f53c4aSTejun Heo /* 271773f53c4aSTejun Heo * Meh... this color is already done, clear first 271873f53c4aSTejun Heo * flusher and repeat cascading. 271973f53c4aSTejun Heo */ 272073f53c4aSTejun Heo wq->first_flusher = NULL; 272173f53c4aSTejun Heo } 272273f53c4aSTejun Heo 272373f53c4aSTejun Heo out_unlock: 272473f53c4aSTejun Heo mutex_unlock(&wq->flush_mutex); 27251da177e4SLinus Torvalds } 2726ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue); 27271da177e4SLinus Torvalds 27289c5a2ba7STejun Heo /** 27299c5a2ba7STejun Heo * drain_workqueue - drain a workqueue 27309c5a2ba7STejun Heo * @wq: workqueue to drain 27319c5a2ba7STejun Heo * 27329c5a2ba7STejun Heo * Wait until the workqueue becomes empty. While draining is in progress, 27339c5a2ba7STejun Heo * only chain queueing is allowed. IOW, only currently pending or running 27349c5a2ba7STejun Heo * work items on @wq can queue further work items on it. @wq is flushed 27359c5a2ba7STejun Heo * repeatedly until it becomes empty. The number of flushing is detemined 27369c5a2ba7STejun Heo * by the depth of chaining and should be relatively short. Whine if it 27379c5a2ba7STejun Heo * takes too long. 27389c5a2ba7STejun Heo */ 27399c5a2ba7STejun Heo void drain_workqueue(struct workqueue_struct *wq) 27409c5a2ba7STejun Heo { 27419c5a2ba7STejun Heo unsigned int flush_cnt = 0; 274249e3cf44STejun Heo struct pool_workqueue *pwq; 27439c5a2ba7STejun Heo 27449c5a2ba7STejun Heo /* 27459c5a2ba7STejun Heo * __queue_work() needs to test whether there are drainers, is much 27469c5a2ba7STejun Heo * hotter than drain_workqueue() and already looks at @wq->flags. 27479c5a2ba7STejun Heo * Use WQ_DRAINING so that queue doesn't have to check nr_drainers. 27489c5a2ba7STejun Heo */ 2749e98d5b16STejun Heo spin_lock_irq(&workqueue_lock); 27509c5a2ba7STejun Heo if (!wq->nr_drainers++) 27519c5a2ba7STejun Heo wq->flags |= WQ_DRAINING; 2752e98d5b16STejun Heo spin_unlock_irq(&workqueue_lock); 27539c5a2ba7STejun Heo reflush: 27549c5a2ba7STejun Heo flush_workqueue(wq); 27559c5a2ba7STejun Heo 275676af4d93STejun Heo local_irq_disable(); 275776af4d93STejun Heo 275849e3cf44STejun Heo for_each_pwq(pwq, wq) { 2759fa2563e4SThomas Tuttle bool drained; 27609c5a2ba7STejun Heo 276176af4d93STejun Heo spin_lock(&pwq->pool->lock); 2762112202d9STejun Heo drained = !pwq->nr_active && list_empty(&pwq->delayed_works); 276376af4d93STejun Heo spin_unlock(&pwq->pool->lock); 2764fa2563e4SThomas Tuttle 2765fa2563e4SThomas Tuttle if (drained) 27669c5a2ba7STejun Heo continue; 27679c5a2ba7STejun Heo 27689c5a2ba7STejun Heo if (++flush_cnt == 10 || 27699c5a2ba7STejun Heo (flush_cnt % 100 == 0 && flush_cnt <= 1000)) 2770044c782cSValentin Ilie pr_warn("workqueue %s: flush on destruction isn't complete after %u tries\n", 27719c5a2ba7STejun Heo wq->name, flush_cnt); 277276af4d93STejun Heo 277376af4d93STejun Heo local_irq_enable(); 27749c5a2ba7STejun Heo goto reflush; 27759c5a2ba7STejun Heo } 27769c5a2ba7STejun Heo 277776af4d93STejun Heo spin_lock(&workqueue_lock); 27789c5a2ba7STejun Heo if (!--wq->nr_drainers) 27799c5a2ba7STejun Heo wq->flags &= ~WQ_DRAINING; 278076af4d93STejun Heo spin_unlock(&workqueue_lock); 278176af4d93STejun Heo 278276af4d93STejun Heo local_irq_enable(); 27839c5a2ba7STejun Heo } 27849c5a2ba7STejun Heo EXPORT_SYMBOL_GPL(drain_workqueue); 27859c5a2ba7STejun Heo 2786606a5020STejun Heo static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) 2787baf59022STejun Heo { 2788baf59022STejun Heo struct worker *worker = NULL; 2789c9e7cf27STejun Heo struct worker_pool *pool; 2790112202d9STejun Heo struct pool_workqueue *pwq; 2791baf59022STejun Heo 2792baf59022STejun Heo might_sleep(); 2793baf59022STejun Heo 2794fa1b54e6STejun Heo local_irq_disable(); 2795fa1b54e6STejun Heo pool = get_work_pool(work); 2796fa1b54e6STejun Heo if (!pool) { 2797fa1b54e6STejun Heo local_irq_enable(); 2798fa1b54e6STejun Heo return false; 2799fa1b54e6STejun Heo } 2800fa1b54e6STejun Heo 2801fa1b54e6STejun Heo spin_lock(&pool->lock); 28020b3dae68SLai Jiangshan /* see the comment in try_to_grab_pending() with the same code */ 2803112202d9STejun Heo pwq = get_work_pwq(work); 2804112202d9STejun Heo if (pwq) { 2805112202d9STejun Heo if (unlikely(pwq->pool != pool)) 2806baf59022STejun Heo goto already_gone; 2807606a5020STejun Heo } else { 2808c9e7cf27STejun Heo worker = find_worker_executing_work(pool, work); 2809baf59022STejun Heo if (!worker) 2810baf59022STejun Heo goto already_gone; 2811112202d9STejun Heo pwq = worker->current_pwq; 2812606a5020STejun Heo } 2813baf59022STejun Heo 2814112202d9STejun Heo insert_wq_barrier(pwq, barr, work, worker); 2815d565ed63STejun Heo spin_unlock_irq(&pool->lock); 2816baf59022STejun Heo 2817e159489bSTejun Heo /* 2818e159489bSTejun Heo * If @max_active is 1 or rescuer is in use, flushing another work 2819e159489bSTejun Heo * item on the same workqueue may lead to deadlock. Make sure the 2820e159489bSTejun Heo * flusher is not running on the same workqueue by verifying write 2821e159489bSTejun Heo * access. 2822e159489bSTejun Heo */ 2823493008a8STejun Heo if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer) 2824112202d9STejun Heo lock_map_acquire(&pwq->wq->lockdep_map); 2825e159489bSTejun Heo else 2826112202d9STejun Heo lock_map_acquire_read(&pwq->wq->lockdep_map); 2827112202d9STejun Heo lock_map_release(&pwq->wq->lockdep_map); 2828e159489bSTejun Heo 2829baf59022STejun Heo return true; 2830baf59022STejun Heo already_gone: 2831d565ed63STejun Heo spin_unlock_irq(&pool->lock); 2832baf59022STejun Heo return false; 2833baf59022STejun Heo } 2834baf59022STejun Heo 2835db700897SOleg Nesterov /** 2836401a8d04STejun Heo * flush_work - wait for a work to finish executing the last queueing instance 2837401a8d04STejun Heo * @work: the work to flush 2838db700897SOleg Nesterov * 2839606a5020STejun Heo * Wait until @work has finished execution. @work is guaranteed to be idle 2840606a5020STejun Heo * on return if it hasn't been requeued since flush started. 2841401a8d04STejun Heo * 2842401a8d04STejun Heo * RETURNS: 2843401a8d04STejun Heo * %true if flush_work() waited for the work to finish execution, 2844401a8d04STejun Heo * %false if it was already idle. 2845db700897SOleg Nesterov */ 2846401a8d04STejun Heo bool flush_work(struct work_struct *work) 2847db700897SOleg Nesterov { 2848db700897SOleg Nesterov struct wq_barrier barr; 2849db700897SOleg Nesterov 28500976dfc1SStephen Boyd lock_map_acquire(&work->lockdep_map); 28510976dfc1SStephen Boyd lock_map_release(&work->lockdep_map); 28520976dfc1SStephen Boyd 2853606a5020STejun Heo if (start_flush_work(work, &barr)) { 2854db700897SOleg Nesterov wait_for_completion(&barr.done); 2855dc186ad7SThomas Gleixner destroy_work_on_stack(&barr.work); 2856401a8d04STejun Heo return true; 2857606a5020STejun Heo } else { 2858401a8d04STejun Heo return false; 2859db700897SOleg Nesterov } 2860606a5020STejun Heo } 2861db700897SOleg Nesterov EXPORT_SYMBOL_GPL(flush_work); 2862db700897SOleg Nesterov 286336e227d2STejun Heo static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) 2864401a8d04STejun Heo { 2865bbb68dfaSTejun Heo unsigned long flags; 28661f1f642eSOleg Nesterov int ret; 28671f1f642eSOleg Nesterov 28681f1f642eSOleg Nesterov do { 2869bbb68dfaSTejun Heo ret = try_to_grab_pending(work, is_dwork, &flags); 2870bbb68dfaSTejun Heo /* 2871bbb68dfaSTejun Heo * If someone else is canceling, wait for the same event it 2872bbb68dfaSTejun Heo * would be waiting for before retrying. 2873bbb68dfaSTejun Heo */ 2874bbb68dfaSTejun Heo if (unlikely(ret == -ENOENT)) 2875606a5020STejun Heo flush_work(work); 28761f1f642eSOleg Nesterov } while (unlikely(ret < 0)); 28771f1f642eSOleg Nesterov 2878bbb68dfaSTejun Heo /* tell other tasks trying to grab @work to back off */ 2879bbb68dfaSTejun Heo mark_work_canceling(work); 2880bbb68dfaSTejun Heo local_irq_restore(flags); 2881bbb68dfaSTejun Heo 2882606a5020STejun Heo flush_work(work); 28837a22ad75STejun Heo clear_work_data(work); 28841f1f642eSOleg Nesterov return ret; 28851f1f642eSOleg Nesterov } 28861f1f642eSOleg Nesterov 28876e84d644SOleg Nesterov /** 2888401a8d04STejun Heo * cancel_work_sync - cancel a work and wait for it to finish 2889401a8d04STejun Heo * @work: the work to cancel 28906e84d644SOleg Nesterov * 2891401a8d04STejun Heo * Cancel @work and wait for its execution to finish. This function 2892401a8d04STejun Heo * can be used even if the work re-queues itself or migrates to 2893401a8d04STejun Heo * another workqueue. On return from this function, @work is 2894401a8d04STejun Heo * guaranteed to be not pending or executing on any CPU. 28951f1f642eSOleg Nesterov * 2896401a8d04STejun Heo * cancel_work_sync(&delayed_work->work) must not be used for 2897401a8d04STejun Heo * delayed_work's. Use cancel_delayed_work_sync() instead. 28986e84d644SOleg Nesterov * 2899401a8d04STejun Heo * The caller must ensure that the workqueue on which @work was last 29006e84d644SOleg Nesterov * queued can't be destroyed before this function returns. 2901401a8d04STejun Heo * 2902401a8d04STejun Heo * RETURNS: 2903401a8d04STejun Heo * %true if @work was pending, %false otherwise. 29046e84d644SOleg Nesterov */ 2905401a8d04STejun Heo bool cancel_work_sync(struct work_struct *work) 29066e84d644SOleg Nesterov { 290736e227d2STejun Heo return __cancel_work_timer(work, false); 2908b89deed3SOleg Nesterov } 290928e53bddSOleg Nesterov EXPORT_SYMBOL_GPL(cancel_work_sync); 2910b89deed3SOleg Nesterov 29116e84d644SOleg Nesterov /** 2912401a8d04STejun Heo * flush_delayed_work - wait for a dwork to finish executing the last queueing 2913401a8d04STejun Heo * @dwork: the delayed work to flush 29146e84d644SOleg Nesterov * 2915401a8d04STejun Heo * Delayed timer is cancelled and the pending work is queued for 2916401a8d04STejun Heo * immediate execution. Like flush_work(), this function only 2917401a8d04STejun Heo * considers the last queueing instance of @dwork. 29181f1f642eSOleg Nesterov * 2919401a8d04STejun Heo * RETURNS: 2920401a8d04STejun Heo * %true if flush_work() waited for the work to finish execution, 2921401a8d04STejun Heo * %false if it was already idle. 29226e84d644SOleg Nesterov */ 2923401a8d04STejun Heo bool flush_delayed_work(struct delayed_work *dwork) 2924401a8d04STejun Heo { 29258930cabaSTejun Heo local_irq_disable(); 2926401a8d04STejun Heo if (del_timer_sync(&dwork->timer)) 292760c057bcSLai Jiangshan __queue_work(dwork->cpu, dwork->wq, &dwork->work); 29288930cabaSTejun Heo local_irq_enable(); 2929401a8d04STejun Heo return flush_work(&dwork->work); 2930401a8d04STejun Heo } 2931401a8d04STejun Heo EXPORT_SYMBOL(flush_delayed_work); 2932401a8d04STejun Heo 2933401a8d04STejun Heo /** 293457b30ae7STejun Heo * cancel_delayed_work - cancel a delayed work 293557b30ae7STejun Heo * @dwork: delayed_work to cancel 293609383498STejun Heo * 293757b30ae7STejun Heo * Kill off a pending delayed_work. Returns %true if @dwork was pending 293857b30ae7STejun Heo * and canceled; %false if wasn't pending. Note that the work callback 293957b30ae7STejun Heo * function may still be running on return, unless it returns %true and the 294057b30ae7STejun Heo * work doesn't re-arm itself. Explicitly flush or use 294157b30ae7STejun Heo * cancel_delayed_work_sync() to wait on it. 294209383498STejun Heo * 294357b30ae7STejun Heo * This function is safe to call from any context including IRQ handler. 294409383498STejun Heo */ 294557b30ae7STejun Heo bool cancel_delayed_work(struct delayed_work *dwork) 294609383498STejun Heo { 294757b30ae7STejun Heo unsigned long flags; 294857b30ae7STejun Heo int ret; 294957b30ae7STejun Heo 295057b30ae7STejun Heo do { 295157b30ae7STejun Heo ret = try_to_grab_pending(&dwork->work, true, &flags); 295257b30ae7STejun Heo } while (unlikely(ret == -EAGAIN)); 295357b30ae7STejun Heo 295457b30ae7STejun Heo if (unlikely(ret < 0)) 295557b30ae7STejun Heo return false; 295657b30ae7STejun Heo 29577c3eed5cSTejun Heo set_work_pool_and_clear_pending(&dwork->work, 29587c3eed5cSTejun Heo get_work_pool_id(&dwork->work)); 295957b30ae7STejun Heo local_irq_restore(flags); 2960c0158ca6SDan Magenheimer return ret; 296109383498STejun Heo } 296257b30ae7STejun Heo EXPORT_SYMBOL(cancel_delayed_work); 296309383498STejun Heo 296409383498STejun Heo /** 2965401a8d04STejun Heo * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish 2966401a8d04STejun Heo * @dwork: the delayed work cancel 2967401a8d04STejun Heo * 2968401a8d04STejun Heo * This is cancel_work_sync() for delayed works. 2969401a8d04STejun Heo * 2970401a8d04STejun Heo * RETURNS: 2971401a8d04STejun Heo * %true if @dwork was pending, %false otherwise. 2972401a8d04STejun Heo */ 2973401a8d04STejun Heo bool cancel_delayed_work_sync(struct delayed_work *dwork) 29746e84d644SOleg Nesterov { 297536e227d2STejun Heo return __cancel_work_timer(&dwork->work, true); 29766e84d644SOleg Nesterov } 2977f5a421a4SOleg Nesterov EXPORT_SYMBOL(cancel_delayed_work_sync); 29781da177e4SLinus Torvalds 29790fcb78c2SRolf Eike Beer /** 2980c1a220e7SZhang Rui * schedule_work_on - put work task on a specific cpu 2981c1a220e7SZhang Rui * @cpu: cpu to put the work task on 2982c1a220e7SZhang Rui * @work: job to be done 2983c1a220e7SZhang Rui * 2984c1a220e7SZhang Rui * This puts a job on a specific cpu 2985c1a220e7SZhang Rui */ 2986d4283e93STejun Heo bool schedule_work_on(int cpu, struct work_struct *work) 2987c1a220e7SZhang Rui { 2988d320c038STejun Heo return queue_work_on(cpu, system_wq, work); 2989c1a220e7SZhang Rui } 2990c1a220e7SZhang Rui EXPORT_SYMBOL(schedule_work_on); 2991c1a220e7SZhang Rui 29920fcb78c2SRolf Eike Beer /** 2993ae90dd5dSDave Jones * schedule_work - put work task in global workqueue 29941da177e4SLinus Torvalds * @work: job to be done 29951da177e4SLinus Torvalds * 2996d4283e93STejun Heo * Returns %false if @work was already on the kernel-global workqueue and 2997d4283e93STejun Heo * %true otherwise. 299852bad64dSDavid Howells * 29990fcb78c2SRolf Eike Beer * This puts a job in the kernel-global workqueue if it was not already 30000fcb78c2SRolf Eike Beer * queued and leaves it in the same position on the kernel-global 30010fcb78c2SRolf Eike Beer * workqueue otherwise. 30021da177e4SLinus Torvalds */ 3003d4283e93STejun Heo bool schedule_work(struct work_struct *work) 30041da177e4SLinus Torvalds { 30050fcb78c2SRolf Eike Beer return queue_work(system_wq, work); 30061da177e4SLinus Torvalds } 30070fcb78c2SRolf Eike Beer EXPORT_SYMBOL(schedule_work); 30081da177e4SLinus Torvalds 30090fcb78c2SRolf Eike Beer /** 30100fcb78c2SRolf Eike Beer * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 30110fcb78c2SRolf Eike Beer * @cpu: cpu to use 30120fcb78c2SRolf Eike Beer * @dwork: job to be done 30130fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait 30140fcb78c2SRolf Eike Beer * 30150fcb78c2SRolf Eike Beer * After waiting for a given time this puts a job in the kernel-global 30160fcb78c2SRolf Eike Beer * workqueue on the specified CPU. 30170fcb78c2SRolf Eike Beer */ 3018d4283e93STejun Heo bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork, 3019d4283e93STejun Heo unsigned long delay) 30201da177e4SLinus Torvalds { 3021d320c038STejun Heo return queue_delayed_work_on(cpu, system_wq, dwork, delay); 30221da177e4SLinus Torvalds } 3023ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on); 30241da177e4SLinus Torvalds 3025b6136773SAndrew Morton /** 30260a13c00eSTejun Heo * schedule_delayed_work - put work task in global workqueue after delay 30270a13c00eSTejun Heo * @dwork: job to be done 30280a13c00eSTejun Heo * @delay: number of jiffies to wait or 0 for immediate execution 30290a13c00eSTejun Heo * 30300a13c00eSTejun Heo * After waiting for a given time this puts a job in the kernel-global 30310a13c00eSTejun Heo * workqueue. 30320a13c00eSTejun Heo */ 3033d4283e93STejun Heo bool schedule_delayed_work(struct delayed_work *dwork, unsigned long delay) 30340a13c00eSTejun Heo { 30350a13c00eSTejun Heo return queue_delayed_work(system_wq, dwork, delay); 30360a13c00eSTejun Heo } 30370a13c00eSTejun Heo EXPORT_SYMBOL(schedule_delayed_work); 30380a13c00eSTejun Heo 30390a13c00eSTejun Heo /** 304031ddd871STejun Heo * schedule_on_each_cpu - execute a function synchronously on each online CPU 3041b6136773SAndrew Morton * @func: the function to call 3042b6136773SAndrew Morton * 304331ddd871STejun Heo * schedule_on_each_cpu() executes @func on each online CPU using the 304431ddd871STejun Heo * system workqueue and blocks until all CPUs have completed. 3045b6136773SAndrew Morton * schedule_on_each_cpu() is very slow. 304631ddd871STejun Heo * 304731ddd871STejun Heo * RETURNS: 304831ddd871STejun Heo * 0 on success, -errno on failure. 3049b6136773SAndrew Morton */ 305065f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func) 305115316ba8SChristoph Lameter { 305215316ba8SChristoph Lameter int cpu; 305338f51568SNamhyung Kim struct work_struct __percpu *works; 305415316ba8SChristoph Lameter 3055b6136773SAndrew Morton works = alloc_percpu(struct work_struct); 3056b6136773SAndrew Morton if (!works) 305715316ba8SChristoph Lameter return -ENOMEM; 3058b6136773SAndrew Morton 305995402b38SGautham R Shenoy get_online_cpus(); 306093981800STejun Heo 306115316ba8SChristoph Lameter for_each_online_cpu(cpu) { 30629bfb1839SIngo Molnar struct work_struct *work = per_cpu_ptr(works, cpu); 30639bfb1839SIngo Molnar 30649bfb1839SIngo Molnar INIT_WORK(work, func); 30658de6d308SOleg Nesterov schedule_work_on(cpu, work); 306615316ba8SChristoph Lameter } 306793981800STejun Heo 306893981800STejun Heo for_each_online_cpu(cpu) 30698616a89aSOleg Nesterov flush_work(per_cpu_ptr(works, cpu)); 307093981800STejun Heo 307195402b38SGautham R Shenoy put_online_cpus(); 3072b6136773SAndrew Morton free_percpu(works); 307315316ba8SChristoph Lameter return 0; 307415316ba8SChristoph Lameter } 307515316ba8SChristoph Lameter 3076eef6a7d5SAlan Stern /** 3077eef6a7d5SAlan Stern * flush_scheduled_work - ensure that any scheduled work has run to completion. 3078eef6a7d5SAlan Stern * 3079eef6a7d5SAlan Stern * Forces execution of the kernel-global workqueue and blocks until its 3080eef6a7d5SAlan Stern * completion. 3081eef6a7d5SAlan Stern * 3082eef6a7d5SAlan Stern * Think twice before calling this function! It's very easy to get into 3083eef6a7d5SAlan Stern * trouble if you don't take great care. Either of the following situations 3084eef6a7d5SAlan Stern * will lead to deadlock: 3085eef6a7d5SAlan Stern * 3086eef6a7d5SAlan Stern * One of the work items currently on the workqueue needs to acquire 3087eef6a7d5SAlan Stern * a lock held by your code or its caller. 3088eef6a7d5SAlan Stern * 3089eef6a7d5SAlan Stern * Your code is running in the context of a work routine. 3090eef6a7d5SAlan Stern * 3091eef6a7d5SAlan Stern * They will be detected by lockdep when they occur, but the first might not 3092eef6a7d5SAlan Stern * occur very often. It depends on what work items are on the workqueue and 3093eef6a7d5SAlan Stern * what locks they need, which you have no control over. 3094eef6a7d5SAlan Stern * 3095eef6a7d5SAlan Stern * In most situations flushing the entire workqueue is overkill; you merely 3096eef6a7d5SAlan Stern * need to know that a particular work item isn't queued and isn't running. 3097eef6a7d5SAlan Stern * In such cases you should use cancel_delayed_work_sync() or 3098eef6a7d5SAlan Stern * cancel_work_sync() instead. 3099eef6a7d5SAlan Stern */ 31001da177e4SLinus Torvalds void flush_scheduled_work(void) 31011da177e4SLinus Torvalds { 3102d320c038STejun Heo flush_workqueue(system_wq); 31031da177e4SLinus Torvalds } 3104ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work); 31051da177e4SLinus Torvalds 31061da177e4SLinus Torvalds /** 31071fa44ecaSJames Bottomley * execute_in_process_context - reliably execute the routine with user context 31081fa44ecaSJames Bottomley * @fn: the function to execute 31091fa44ecaSJames Bottomley * @ew: guaranteed storage for the execute work structure (must 31101fa44ecaSJames Bottomley * be available when the work executes) 31111fa44ecaSJames Bottomley * 31121fa44ecaSJames Bottomley * Executes the function immediately if process context is available, 31131fa44ecaSJames Bottomley * otherwise schedules the function for delayed execution. 31141fa44ecaSJames Bottomley * 31151fa44ecaSJames Bottomley * Returns: 0 - function was executed 31161fa44ecaSJames Bottomley * 1 - function was scheduled for execution 31171fa44ecaSJames Bottomley */ 311865f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew) 31191fa44ecaSJames Bottomley { 31201fa44ecaSJames Bottomley if (!in_interrupt()) { 312165f27f38SDavid Howells fn(&ew->work); 31221fa44ecaSJames Bottomley return 0; 31231fa44ecaSJames Bottomley } 31241fa44ecaSJames Bottomley 312565f27f38SDavid Howells INIT_WORK(&ew->work, fn); 31261fa44ecaSJames Bottomley schedule_work(&ew->work); 31271fa44ecaSJames Bottomley 31281fa44ecaSJames Bottomley return 1; 31291fa44ecaSJames Bottomley } 31301fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context); 31311fa44ecaSJames Bottomley 31321da177e4SLinus Torvalds int keventd_up(void) 31331da177e4SLinus Torvalds { 3134d320c038STejun Heo return system_wq != NULL; 31351da177e4SLinus Torvalds } 31361da177e4SLinus Torvalds 31377a4e344cSTejun Heo /** 31387a4e344cSTejun Heo * free_workqueue_attrs - free a workqueue_attrs 31397a4e344cSTejun Heo * @attrs: workqueue_attrs to free 31407a4e344cSTejun Heo * 31417a4e344cSTejun Heo * Undo alloc_workqueue_attrs(). 31427a4e344cSTejun Heo */ 31437a4e344cSTejun Heo void free_workqueue_attrs(struct workqueue_attrs *attrs) 31447a4e344cSTejun Heo { 31457a4e344cSTejun Heo if (attrs) { 31467a4e344cSTejun Heo free_cpumask_var(attrs->cpumask); 31477a4e344cSTejun Heo kfree(attrs); 31487a4e344cSTejun Heo } 31497a4e344cSTejun Heo } 31507a4e344cSTejun Heo 31517a4e344cSTejun Heo /** 31527a4e344cSTejun Heo * alloc_workqueue_attrs - allocate a workqueue_attrs 31537a4e344cSTejun Heo * @gfp_mask: allocation mask to use 31547a4e344cSTejun Heo * 31557a4e344cSTejun Heo * Allocate a new workqueue_attrs, initialize with default settings and 31567a4e344cSTejun Heo * return it. Returns NULL on failure. 31577a4e344cSTejun Heo */ 31587a4e344cSTejun Heo struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask) 31597a4e344cSTejun Heo { 31607a4e344cSTejun Heo struct workqueue_attrs *attrs; 31617a4e344cSTejun Heo 31627a4e344cSTejun Heo attrs = kzalloc(sizeof(*attrs), gfp_mask); 31637a4e344cSTejun Heo if (!attrs) 31647a4e344cSTejun Heo goto fail; 31657a4e344cSTejun Heo if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask)) 31667a4e344cSTejun Heo goto fail; 31677a4e344cSTejun Heo 31687a4e344cSTejun Heo cpumask_setall(attrs->cpumask); 31697a4e344cSTejun Heo return attrs; 31707a4e344cSTejun Heo fail: 31717a4e344cSTejun Heo free_workqueue_attrs(attrs); 31727a4e344cSTejun Heo return NULL; 31737a4e344cSTejun Heo } 31747a4e344cSTejun Heo 317529c91e99STejun Heo static void copy_workqueue_attrs(struct workqueue_attrs *to, 317629c91e99STejun Heo const struct workqueue_attrs *from) 317729c91e99STejun Heo { 317829c91e99STejun Heo to->nice = from->nice; 317929c91e99STejun Heo cpumask_copy(to->cpumask, from->cpumask); 318029c91e99STejun Heo } 318129c91e99STejun Heo 318229c91e99STejun Heo /* 318329c91e99STejun Heo * Hacky implementation of jhash of bitmaps which only considers the 318429c91e99STejun Heo * specified number of bits. We probably want a proper implementation in 318529c91e99STejun Heo * include/linux/jhash.h. 318629c91e99STejun Heo */ 318729c91e99STejun Heo static u32 jhash_bitmap(const unsigned long *bitmap, int bits, u32 hash) 318829c91e99STejun Heo { 318929c91e99STejun Heo int nr_longs = bits / BITS_PER_LONG; 319029c91e99STejun Heo int nr_leftover = bits % BITS_PER_LONG; 319129c91e99STejun Heo unsigned long leftover = 0; 319229c91e99STejun Heo 319329c91e99STejun Heo if (nr_longs) 319429c91e99STejun Heo hash = jhash(bitmap, nr_longs * sizeof(long), hash); 319529c91e99STejun Heo if (nr_leftover) { 319629c91e99STejun Heo bitmap_copy(&leftover, bitmap + nr_longs, nr_leftover); 319729c91e99STejun Heo hash = jhash(&leftover, sizeof(long), hash); 319829c91e99STejun Heo } 319929c91e99STejun Heo return hash; 320029c91e99STejun Heo } 320129c91e99STejun Heo 320229c91e99STejun Heo /* hash value of the content of @attr */ 320329c91e99STejun Heo static u32 wqattrs_hash(const struct workqueue_attrs *attrs) 320429c91e99STejun Heo { 320529c91e99STejun Heo u32 hash = 0; 320629c91e99STejun Heo 320729c91e99STejun Heo hash = jhash_1word(attrs->nice, hash); 320829c91e99STejun Heo hash = jhash_bitmap(cpumask_bits(attrs->cpumask), nr_cpu_ids, hash); 320929c91e99STejun Heo return hash; 321029c91e99STejun Heo } 321129c91e99STejun Heo 321229c91e99STejun Heo /* content equality test */ 321329c91e99STejun Heo static bool wqattrs_equal(const struct workqueue_attrs *a, 321429c91e99STejun Heo const struct workqueue_attrs *b) 321529c91e99STejun Heo { 321629c91e99STejun Heo if (a->nice != b->nice) 321729c91e99STejun Heo return false; 321829c91e99STejun Heo if (!cpumask_equal(a->cpumask, b->cpumask)) 321929c91e99STejun Heo return false; 322029c91e99STejun Heo return true; 322129c91e99STejun Heo } 322229c91e99STejun Heo 32237a4e344cSTejun Heo /** 32247a4e344cSTejun Heo * init_worker_pool - initialize a newly zalloc'd worker_pool 32257a4e344cSTejun Heo * @pool: worker_pool to initialize 32267a4e344cSTejun Heo * 32277a4e344cSTejun Heo * Initiailize a newly zalloc'd @pool. It also allocates @pool->attrs. 322829c91e99STejun Heo * Returns 0 on success, -errno on failure. Even on failure, all fields 322929c91e99STejun Heo * inside @pool proper are initialized and put_unbound_pool() can be called 323029c91e99STejun Heo * on @pool safely to release it. 32317a4e344cSTejun Heo */ 32327a4e344cSTejun Heo static int init_worker_pool(struct worker_pool *pool) 32334e1a1f9aSTejun Heo { 32344e1a1f9aSTejun Heo spin_lock_init(&pool->lock); 323529c91e99STejun Heo pool->id = -1; 323629c91e99STejun Heo pool->cpu = -1; 32374e1a1f9aSTejun Heo pool->flags |= POOL_DISASSOCIATED; 32384e1a1f9aSTejun Heo INIT_LIST_HEAD(&pool->worklist); 32394e1a1f9aSTejun Heo INIT_LIST_HEAD(&pool->idle_list); 32404e1a1f9aSTejun Heo hash_init(pool->busy_hash); 32414e1a1f9aSTejun Heo 32424e1a1f9aSTejun Heo init_timer_deferrable(&pool->idle_timer); 32434e1a1f9aSTejun Heo pool->idle_timer.function = idle_worker_timeout; 32444e1a1f9aSTejun Heo pool->idle_timer.data = (unsigned long)pool; 32454e1a1f9aSTejun Heo 32464e1a1f9aSTejun Heo setup_timer(&pool->mayday_timer, pool_mayday_timeout, 32474e1a1f9aSTejun Heo (unsigned long)pool); 32484e1a1f9aSTejun Heo 32494e1a1f9aSTejun Heo mutex_init(&pool->manager_arb); 32504e1a1f9aSTejun Heo mutex_init(&pool->assoc_mutex); 32514e1a1f9aSTejun Heo ida_init(&pool->worker_ida); 32527a4e344cSTejun Heo 325329c91e99STejun Heo INIT_HLIST_NODE(&pool->hash_node); 325429c91e99STejun Heo pool->refcnt = 1; 325529c91e99STejun Heo 325629c91e99STejun Heo /* shouldn't fail above this point */ 32577a4e344cSTejun Heo pool->attrs = alloc_workqueue_attrs(GFP_KERNEL); 32587a4e344cSTejun Heo if (!pool->attrs) 32597a4e344cSTejun Heo return -ENOMEM; 32607a4e344cSTejun Heo return 0; 32614e1a1f9aSTejun Heo } 32624e1a1f9aSTejun Heo 326329c91e99STejun Heo static void rcu_free_pool(struct rcu_head *rcu) 326429c91e99STejun Heo { 326529c91e99STejun Heo struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); 326629c91e99STejun Heo 326729c91e99STejun Heo ida_destroy(&pool->worker_ida); 326829c91e99STejun Heo free_workqueue_attrs(pool->attrs); 326929c91e99STejun Heo kfree(pool); 327029c91e99STejun Heo } 327129c91e99STejun Heo 327229c91e99STejun Heo /** 327329c91e99STejun Heo * put_unbound_pool - put a worker_pool 327429c91e99STejun Heo * @pool: worker_pool to put 327529c91e99STejun Heo * 327629c91e99STejun Heo * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU 327729c91e99STejun Heo * safe manner. 327829c91e99STejun Heo */ 327929c91e99STejun Heo static void put_unbound_pool(struct worker_pool *pool) 328029c91e99STejun Heo { 328129c91e99STejun Heo struct worker *worker; 328229c91e99STejun Heo 328329c91e99STejun Heo spin_lock_irq(&workqueue_lock); 328429c91e99STejun Heo if (--pool->refcnt) { 328529c91e99STejun Heo spin_unlock_irq(&workqueue_lock); 328629c91e99STejun Heo return; 328729c91e99STejun Heo } 328829c91e99STejun Heo 328929c91e99STejun Heo /* sanity checks */ 329029c91e99STejun Heo if (WARN_ON(!(pool->flags & POOL_DISASSOCIATED)) || 329129c91e99STejun Heo WARN_ON(!list_empty(&pool->worklist))) { 329229c91e99STejun Heo spin_unlock_irq(&workqueue_lock); 329329c91e99STejun Heo return; 329429c91e99STejun Heo } 329529c91e99STejun Heo 329629c91e99STejun Heo /* release id and unhash */ 329729c91e99STejun Heo if (pool->id >= 0) 329829c91e99STejun Heo idr_remove(&worker_pool_idr, pool->id); 329929c91e99STejun Heo hash_del(&pool->hash_node); 330029c91e99STejun Heo 330129c91e99STejun Heo spin_unlock_irq(&workqueue_lock); 330229c91e99STejun Heo 330329c91e99STejun Heo /* lock out manager and destroy all workers */ 330429c91e99STejun Heo mutex_lock(&pool->manager_arb); 330529c91e99STejun Heo spin_lock_irq(&pool->lock); 330629c91e99STejun Heo 330729c91e99STejun Heo while ((worker = first_worker(pool))) 330829c91e99STejun Heo destroy_worker(worker); 330929c91e99STejun Heo WARN_ON(pool->nr_workers || pool->nr_idle); 331029c91e99STejun Heo 331129c91e99STejun Heo spin_unlock_irq(&pool->lock); 331229c91e99STejun Heo mutex_unlock(&pool->manager_arb); 331329c91e99STejun Heo 331429c91e99STejun Heo /* shut down the timers */ 331529c91e99STejun Heo del_timer_sync(&pool->idle_timer); 331629c91e99STejun Heo del_timer_sync(&pool->mayday_timer); 331729c91e99STejun Heo 331829c91e99STejun Heo /* sched-RCU protected to allow dereferences from get_work_pool() */ 331929c91e99STejun Heo call_rcu_sched(&pool->rcu, rcu_free_pool); 332029c91e99STejun Heo } 332129c91e99STejun Heo 332229c91e99STejun Heo /** 332329c91e99STejun Heo * get_unbound_pool - get a worker_pool with the specified attributes 332429c91e99STejun Heo * @attrs: the attributes of the worker_pool to get 332529c91e99STejun Heo * 332629c91e99STejun Heo * Obtain a worker_pool which has the same attributes as @attrs, bump the 332729c91e99STejun Heo * reference count and return it. If there already is a matching 332829c91e99STejun Heo * worker_pool, it will be used; otherwise, this function attempts to 332929c91e99STejun Heo * create a new one. On failure, returns NULL. 333029c91e99STejun Heo */ 333129c91e99STejun Heo static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) 333229c91e99STejun Heo { 333329c91e99STejun Heo static DEFINE_MUTEX(create_mutex); 333429c91e99STejun Heo u32 hash = wqattrs_hash(attrs); 333529c91e99STejun Heo struct worker_pool *pool; 333629c91e99STejun Heo struct worker *worker; 333729c91e99STejun Heo 333829c91e99STejun Heo mutex_lock(&create_mutex); 333929c91e99STejun Heo 334029c91e99STejun Heo /* do we already have a matching pool? */ 334129c91e99STejun Heo spin_lock_irq(&workqueue_lock); 334229c91e99STejun Heo hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { 334329c91e99STejun Heo if (wqattrs_equal(pool->attrs, attrs)) { 334429c91e99STejun Heo pool->refcnt++; 334529c91e99STejun Heo goto out_unlock; 334629c91e99STejun Heo } 334729c91e99STejun Heo } 334829c91e99STejun Heo spin_unlock_irq(&workqueue_lock); 334929c91e99STejun Heo 335029c91e99STejun Heo /* nope, create a new one */ 335129c91e99STejun Heo pool = kzalloc(sizeof(*pool), GFP_KERNEL); 335229c91e99STejun Heo if (!pool || init_worker_pool(pool) < 0) 335329c91e99STejun Heo goto fail; 335429c91e99STejun Heo 33558864b4e5STejun Heo lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */ 335629c91e99STejun Heo copy_workqueue_attrs(pool->attrs, attrs); 335729c91e99STejun Heo 335829c91e99STejun Heo if (worker_pool_assign_id(pool) < 0) 335929c91e99STejun Heo goto fail; 336029c91e99STejun Heo 336129c91e99STejun Heo /* create and start the initial worker */ 336229c91e99STejun Heo worker = create_worker(pool); 336329c91e99STejun Heo if (!worker) 336429c91e99STejun Heo goto fail; 336529c91e99STejun Heo 336629c91e99STejun Heo spin_lock_irq(&pool->lock); 336729c91e99STejun Heo start_worker(worker); 336829c91e99STejun Heo spin_unlock_irq(&pool->lock); 336929c91e99STejun Heo 337029c91e99STejun Heo /* install */ 337129c91e99STejun Heo spin_lock_irq(&workqueue_lock); 337229c91e99STejun Heo hash_add(unbound_pool_hash, &pool->hash_node, hash); 337329c91e99STejun Heo out_unlock: 337429c91e99STejun Heo spin_unlock_irq(&workqueue_lock); 337529c91e99STejun Heo mutex_unlock(&create_mutex); 337629c91e99STejun Heo return pool; 337729c91e99STejun Heo fail: 337829c91e99STejun Heo mutex_unlock(&create_mutex); 337929c91e99STejun Heo if (pool) 338029c91e99STejun Heo put_unbound_pool(pool); 338129c91e99STejun Heo return NULL; 338229c91e99STejun Heo } 338329c91e99STejun Heo 33848864b4e5STejun Heo static void rcu_free_pwq(struct rcu_head *rcu) 33858864b4e5STejun Heo { 33868864b4e5STejun Heo kmem_cache_free(pwq_cache, 33878864b4e5STejun Heo container_of(rcu, struct pool_workqueue, rcu)); 33888864b4e5STejun Heo } 33898864b4e5STejun Heo 33908864b4e5STejun Heo /* 33918864b4e5STejun Heo * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt 33928864b4e5STejun Heo * and needs to be destroyed. 33938864b4e5STejun Heo */ 33948864b4e5STejun Heo static void pwq_unbound_release_workfn(struct work_struct *work) 33958864b4e5STejun Heo { 33968864b4e5STejun Heo struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, 33978864b4e5STejun Heo unbound_release_work); 33988864b4e5STejun Heo struct workqueue_struct *wq = pwq->wq; 33998864b4e5STejun Heo struct worker_pool *pool = pwq->pool; 34008864b4e5STejun Heo 34018864b4e5STejun Heo if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND))) 34028864b4e5STejun Heo return; 34038864b4e5STejun Heo 340475ccf595STejun Heo /* 340575ccf595STejun Heo * Unlink @pwq. Synchronization against flush_mutex isn't strictly 340675ccf595STejun Heo * necessary on release but do it anyway. It's easier to verify 340775ccf595STejun Heo * and consistent with the linking path. 340875ccf595STejun Heo */ 340975ccf595STejun Heo mutex_lock(&wq->flush_mutex); 34108864b4e5STejun Heo spin_lock_irq(&workqueue_lock); 34118864b4e5STejun Heo list_del_rcu(&pwq->pwqs_node); 34128864b4e5STejun Heo spin_unlock_irq(&workqueue_lock); 341375ccf595STejun Heo mutex_unlock(&wq->flush_mutex); 34148864b4e5STejun Heo 34158864b4e5STejun Heo put_unbound_pool(pool); 34168864b4e5STejun Heo call_rcu_sched(&pwq->rcu, rcu_free_pwq); 34178864b4e5STejun Heo 34188864b4e5STejun Heo /* 34198864b4e5STejun Heo * If we're the last pwq going away, @wq is already dead and no one 34208864b4e5STejun Heo * is gonna access it anymore. Free it. 34218864b4e5STejun Heo */ 34228864b4e5STejun Heo if (list_empty(&wq->pwqs)) 34238864b4e5STejun Heo kfree(wq); 34248864b4e5STejun Heo } 34258864b4e5STejun Heo 3426d2c1d404STejun Heo static void init_and_link_pwq(struct pool_workqueue *pwq, 3427d2c1d404STejun Heo struct workqueue_struct *wq, 3428d2c1d404STejun Heo struct worker_pool *pool) 3429d2c1d404STejun Heo { 3430d2c1d404STejun Heo BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); 3431d2c1d404STejun Heo 3432d2c1d404STejun Heo pwq->pool = pool; 3433d2c1d404STejun Heo pwq->wq = wq; 3434d2c1d404STejun Heo pwq->flush_color = -1; 34358864b4e5STejun Heo pwq->refcnt = 1; 3436d2c1d404STejun Heo pwq->max_active = wq->saved_max_active; 3437d2c1d404STejun Heo INIT_LIST_HEAD(&pwq->delayed_works); 3438d2c1d404STejun Heo INIT_LIST_HEAD(&pwq->mayday_node); 34398864b4e5STejun Heo INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn); 3440d2c1d404STejun Heo 344175ccf595STejun Heo /* 344275ccf595STejun Heo * Link @pwq and set the matching work_color. This is synchronized 344375ccf595STejun Heo * with flush_mutex to avoid confusing flush_workqueue(). 344475ccf595STejun Heo */ 344575ccf595STejun Heo mutex_lock(&wq->flush_mutex); 344675ccf595STejun Heo spin_lock_irq(&workqueue_lock); 344775ccf595STejun Heo 344875ccf595STejun Heo pwq->work_color = wq->work_color; 3449d2c1d404STejun Heo list_add_tail_rcu(&pwq->pwqs_node, &wq->pwqs); 345075ccf595STejun Heo 345175ccf595STejun Heo spin_unlock_irq(&workqueue_lock); 345275ccf595STejun Heo mutex_unlock(&wq->flush_mutex); 3453d2c1d404STejun Heo } 3454d2c1d404STejun Heo 345530cdf249STejun Heo static int alloc_and_link_pwqs(struct workqueue_struct *wq) 34561da177e4SLinus Torvalds { 345749e3cf44STejun Heo bool highpri = wq->flags & WQ_HIGHPRI; 345830cdf249STejun Heo int cpu; 3459e1d8aa9fSFrederic Weisbecker 346030cdf249STejun Heo if (!(wq->flags & WQ_UNBOUND)) { 3461420c0ddbSTejun Heo wq->cpu_pwqs = alloc_percpu(struct pool_workqueue); 3462420c0ddbSTejun Heo if (!wq->cpu_pwqs) 346330cdf249STejun Heo return -ENOMEM; 346430cdf249STejun Heo 346530cdf249STejun Heo for_each_possible_cpu(cpu) { 34667fb98ea7STejun Heo struct pool_workqueue *pwq = 34677fb98ea7STejun Heo per_cpu_ptr(wq->cpu_pwqs, cpu); 34687a62c2c8STejun Heo struct worker_pool *cpu_pools = 3469f02ae73aSTejun Heo per_cpu(cpu_worker_pools, cpu); 347030cdf249STejun Heo 3471d2c1d404STejun Heo init_and_link_pwq(pwq, wq, &cpu_pools[highpri]); 347230cdf249STejun Heo } 347330cdf249STejun Heo } else { 347430cdf249STejun Heo struct pool_workqueue *pwq; 3475d2c1d404STejun Heo struct worker_pool *pool; 347630cdf249STejun Heo 347730cdf249STejun Heo pwq = kmem_cache_zalloc(pwq_cache, GFP_KERNEL); 347830cdf249STejun Heo if (!pwq) 347930cdf249STejun Heo return -ENOMEM; 348030cdf249STejun Heo 3481d2c1d404STejun Heo pool = get_unbound_pool(unbound_std_wq_attrs[highpri]); 3482d2c1d404STejun Heo if (!pool) { 348329c91e99STejun Heo kmem_cache_free(pwq_cache, pwq); 348429c91e99STejun Heo return -ENOMEM; 348529c91e99STejun Heo } 348629c91e99STejun Heo 3487d2c1d404STejun Heo init_and_link_pwq(pwq, wq, pool); 348830cdf249STejun Heo } 348930cdf249STejun Heo 349030cdf249STejun Heo return 0; 34910f900049STejun Heo } 34920f900049STejun Heo 3493f3421797STejun Heo static int wq_clamp_max_active(int max_active, unsigned int flags, 3494f3421797STejun Heo const char *name) 3495b71ab8c2STejun Heo { 3496f3421797STejun Heo int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE; 3497f3421797STejun Heo 3498f3421797STejun Heo if (max_active < 1 || max_active > lim) 3499044c782cSValentin Ilie pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n", 3500f3421797STejun Heo max_active, name, 1, lim); 3501b71ab8c2STejun Heo 3502f3421797STejun Heo return clamp_val(max_active, 1, lim); 3503b71ab8c2STejun Heo } 3504b71ab8c2STejun Heo 3505b196be89STejun Heo struct workqueue_struct *__alloc_workqueue_key(const char *fmt, 350697e37d7bSTejun Heo unsigned int flags, 35071e19ffc6STejun Heo int max_active, 3508eb13ba87SJohannes Berg struct lock_class_key *key, 3509b196be89STejun Heo const char *lock_name, ...) 35103af24433SOleg Nesterov { 3511b196be89STejun Heo va_list args, args1; 35123af24433SOleg Nesterov struct workqueue_struct *wq; 351349e3cf44STejun Heo struct pool_workqueue *pwq; 3514b196be89STejun Heo size_t namelen; 3515b196be89STejun Heo 3516b196be89STejun Heo /* determine namelen, allocate wq and format name */ 3517b196be89STejun Heo va_start(args, lock_name); 3518b196be89STejun Heo va_copy(args1, args); 3519b196be89STejun Heo namelen = vsnprintf(NULL, 0, fmt, args) + 1; 3520b196be89STejun Heo 3521b196be89STejun Heo wq = kzalloc(sizeof(*wq) + namelen, GFP_KERNEL); 3522b196be89STejun Heo if (!wq) 3523d2c1d404STejun Heo return NULL; 3524b196be89STejun Heo 3525b196be89STejun Heo vsnprintf(wq->name, namelen, fmt, args1); 3526b196be89STejun Heo va_end(args); 3527b196be89STejun Heo va_end(args1); 35283af24433SOleg Nesterov 3529d320c038STejun Heo max_active = max_active ?: WQ_DFL_ACTIVE; 3530b196be89STejun Heo max_active = wq_clamp_max_active(max_active, flags, wq->name); 35313af24433SOleg Nesterov 3532b196be89STejun Heo /* init wq */ 353397e37d7bSTejun Heo wq->flags = flags; 3534a0a1a5fdSTejun Heo wq->saved_max_active = max_active; 353573f53c4aSTejun Heo mutex_init(&wq->flush_mutex); 3536112202d9STejun Heo atomic_set(&wq->nr_pwqs_to_flush, 0); 353730cdf249STejun Heo INIT_LIST_HEAD(&wq->pwqs); 353873f53c4aSTejun Heo INIT_LIST_HEAD(&wq->flusher_queue); 353973f53c4aSTejun Heo INIT_LIST_HEAD(&wq->flusher_overflow); 3540493a1724STejun Heo INIT_LIST_HEAD(&wq->maydays); 35413af24433SOleg Nesterov 3542eb13ba87SJohannes Berg lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); 3543cce1a165SOleg Nesterov INIT_LIST_HEAD(&wq->list); 35443af24433SOleg Nesterov 354530cdf249STejun Heo if (alloc_and_link_pwqs(wq) < 0) 3546d2c1d404STejun Heo goto err_free_wq; 35471537663fSTejun Heo 3548493008a8STejun Heo /* 3549493008a8STejun Heo * Workqueues which may be used during memory reclaim should 3550493008a8STejun Heo * have a rescuer to guarantee forward progress. 3551493008a8STejun Heo */ 3552493008a8STejun Heo if (flags & WQ_MEM_RECLAIM) { 3553e22bee78STejun Heo struct worker *rescuer; 3554e22bee78STejun Heo 3555d2c1d404STejun Heo rescuer = alloc_worker(); 3556e22bee78STejun Heo if (!rescuer) 3557d2c1d404STejun Heo goto err_destroy; 3558e22bee78STejun Heo 3559111c225aSTejun Heo rescuer->rescue_wq = wq; 3560111c225aSTejun Heo rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", 3561b196be89STejun Heo wq->name); 3562d2c1d404STejun Heo if (IS_ERR(rescuer->task)) { 3563d2c1d404STejun Heo kfree(rescuer); 3564d2c1d404STejun Heo goto err_destroy; 3565d2c1d404STejun Heo } 3566e22bee78STejun Heo 3567d2c1d404STejun Heo wq->rescuer = rescuer; 3568e22bee78STejun Heo rescuer->task->flags |= PF_THREAD_BOUND; 3569e22bee78STejun Heo wake_up_process(rescuer->task); 35703af24433SOleg Nesterov } 35711537663fSTejun Heo 35723af24433SOleg Nesterov /* 3573a0a1a5fdSTejun Heo * workqueue_lock protects global freeze state and workqueues 3574a0a1a5fdSTejun Heo * list. Grab it, set max_active accordingly and add the new 3575a0a1a5fdSTejun Heo * workqueue to workqueues list. 35763af24433SOleg Nesterov */ 3577e98d5b16STejun Heo spin_lock_irq(&workqueue_lock); 3578a0a1a5fdSTejun Heo 357958a69cb4STejun Heo if (workqueue_freezing && wq->flags & WQ_FREEZABLE) 358049e3cf44STejun Heo for_each_pwq(pwq, wq) 358149e3cf44STejun Heo pwq->max_active = 0; 3582a0a1a5fdSTejun Heo 35833af24433SOleg Nesterov list_add(&wq->list, &workqueues); 3584a0a1a5fdSTejun Heo 3585e98d5b16STejun Heo spin_unlock_irq(&workqueue_lock); 35863af24433SOleg Nesterov 35873af24433SOleg Nesterov return wq; 3588d2c1d404STejun Heo 3589d2c1d404STejun Heo err_free_wq: 35904690c4abSTejun Heo kfree(wq); 3591d2c1d404STejun Heo return NULL; 3592d2c1d404STejun Heo err_destroy: 3593d2c1d404STejun Heo destroy_workqueue(wq); 35944690c4abSTejun Heo return NULL; 35951da177e4SLinus Torvalds } 3596d320c038STejun Heo EXPORT_SYMBOL_GPL(__alloc_workqueue_key); 35971da177e4SLinus Torvalds 35983af24433SOleg Nesterov /** 35993af24433SOleg Nesterov * destroy_workqueue - safely terminate a workqueue 36003af24433SOleg Nesterov * @wq: target workqueue 36013af24433SOleg Nesterov * 36023af24433SOleg Nesterov * Safely destroy a workqueue. All work currently pending will be done first. 36033af24433SOleg Nesterov */ 36043af24433SOleg Nesterov void destroy_workqueue(struct workqueue_struct *wq) 36053af24433SOleg Nesterov { 360649e3cf44STejun Heo struct pool_workqueue *pwq; 36073af24433SOleg Nesterov 36089c5a2ba7STejun Heo /* drain it before proceeding with destruction */ 36099c5a2ba7STejun Heo drain_workqueue(wq); 3610c8efcc25STejun Heo 361176af4d93STejun Heo spin_lock_irq(&workqueue_lock); 361276af4d93STejun Heo 36136183c009STejun Heo /* sanity checks */ 361449e3cf44STejun Heo for_each_pwq(pwq, wq) { 36156183c009STejun Heo int i; 36166183c009STejun Heo 361776af4d93STejun Heo for (i = 0; i < WORK_NR_COLORS; i++) { 361876af4d93STejun Heo if (WARN_ON(pwq->nr_in_flight[i])) { 361976af4d93STejun Heo spin_unlock_irq(&workqueue_lock); 36206183c009STejun Heo return; 362176af4d93STejun Heo } 362276af4d93STejun Heo } 362376af4d93STejun Heo 36248864b4e5STejun Heo if (WARN_ON(pwq->refcnt > 1) || 36258864b4e5STejun Heo WARN_ON(pwq->nr_active) || 362676af4d93STejun Heo WARN_ON(!list_empty(&pwq->delayed_works))) { 362776af4d93STejun Heo spin_unlock_irq(&workqueue_lock); 36286183c009STejun Heo return; 36296183c009STejun Heo } 363076af4d93STejun Heo } 36316183c009STejun Heo 3632a0a1a5fdSTejun Heo /* 3633a0a1a5fdSTejun Heo * wq list is used to freeze wq, remove from list after 3634a0a1a5fdSTejun Heo * flushing is complete in case freeze races us. 3635a0a1a5fdSTejun Heo */ 3636d2c1d404STejun Heo list_del_init(&wq->list); 363776af4d93STejun Heo 3638e98d5b16STejun Heo spin_unlock_irq(&workqueue_lock); 36393af24433SOleg Nesterov 3640493008a8STejun Heo if (wq->rescuer) { 3641e22bee78STejun Heo kthread_stop(wq->rescuer->task); 36428d9df9f0SXiaotian Feng kfree(wq->rescuer); 3643493008a8STejun Heo wq->rescuer = NULL; 3644e22bee78STejun Heo } 3645e22bee78STejun Heo 36468864b4e5STejun Heo if (!(wq->flags & WQ_UNBOUND)) { 364729c91e99STejun Heo /* 36488864b4e5STejun Heo * The base ref is never dropped on per-cpu pwqs. Directly 36498864b4e5STejun Heo * free the pwqs and wq. 365029c91e99STejun Heo */ 36518864b4e5STejun Heo free_percpu(wq->cpu_pwqs); 36528864b4e5STejun Heo kfree(wq); 36538864b4e5STejun Heo } else { 36548864b4e5STejun Heo /* 36558864b4e5STejun Heo * We're the sole accessor of @wq at this point. Directly 36568864b4e5STejun Heo * access the first pwq and put the base ref. As both pwqs 36578864b4e5STejun Heo * and pools are sched-RCU protected, the lock operations 36588864b4e5STejun Heo * are safe. @wq will be freed when the last pwq is 36598864b4e5STejun Heo * released. 36608864b4e5STejun Heo */ 366129c91e99STejun Heo pwq = list_first_entry(&wq->pwqs, struct pool_workqueue, 366229c91e99STejun Heo pwqs_node); 36638864b4e5STejun Heo spin_lock_irq(&pwq->pool->lock); 36648864b4e5STejun Heo put_pwq(pwq); 36658864b4e5STejun Heo spin_unlock_irq(&pwq->pool->lock); 366629c91e99STejun Heo } 36673af24433SOleg Nesterov } 36683af24433SOleg Nesterov EXPORT_SYMBOL_GPL(destroy_workqueue); 36693af24433SOleg Nesterov 3670dcd989cbSTejun Heo /** 3671112202d9STejun Heo * pwq_set_max_active - adjust max_active of a pwq 3672112202d9STejun Heo * @pwq: target pool_workqueue 36739f4bd4cdSLai Jiangshan * @max_active: new max_active value. 36749f4bd4cdSLai Jiangshan * 3675112202d9STejun Heo * Set @pwq->max_active to @max_active and activate delayed works if 36769f4bd4cdSLai Jiangshan * increased. 36779f4bd4cdSLai Jiangshan * 36789f4bd4cdSLai Jiangshan * CONTEXT: 3679d565ed63STejun Heo * spin_lock_irq(pool->lock). 36809f4bd4cdSLai Jiangshan */ 3681112202d9STejun Heo static void pwq_set_max_active(struct pool_workqueue *pwq, int max_active) 36829f4bd4cdSLai Jiangshan { 3683112202d9STejun Heo pwq->max_active = max_active; 36849f4bd4cdSLai Jiangshan 3685112202d9STejun Heo while (!list_empty(&pwq->delayed_works) && 3686112202d9STejun Heo pwq->nr_active < pwq->max_active) 3687112202d9STejun Heo pwq_activate_first_delayed(pwq); 36889f4bd4cdSLai Jiangshan } 36899f4bd4cdSLai Jiangshan 36909f4bd4cdSLai Jiangshan /** 3691dcd989cbSTejun Heo * workqueue_set_max_active - adjust max_active of a workqueue 3692dcd989cbSTejun Heo * @wq: target workqueue 3693dcd989cbSTejun Heo * @max_active: new max_active value. 3694dcd989cbSTejun Heo * 3695dcd989cbSTejun Heo * Set max_active of @wq to @max_active. 3696dcd989cbSTejun Heo * 3697dcd989cbSTejun Heo * CONTEXT: 3698dcd989cbSTejun Heo * Don't call from IRQ context. 3699dcd989cbSTejun Heo */ 3700dcd989cbSTejun Heo void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) 3701dcd989cbSTejun Heo { 370249e3cf44STejun Heo struct pool_workqueue *pwq; 3703dcd989cbSTejun Heo 3704f3421797STejun Heo max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); 3705dcd989cbSTejun Heo 3706e98d5b16STejun Heo spin_lock_irq(&workqueue_lock); 3707dcd989cbSTejun Heo 3708dcd989cbSTejun Heo wq->saved_max_active = max_active; 3709dcd989cbSTejun Heo 371049e3cf44STejun Heo for_each_pwq(pwq, wq) { 3711112202d9STejun Heo struct worker_pool *pool = pwq->pool; 3712dcd989cbSTejun Heo 3713e98d5b16STejun Heo spin_lock(&pool->lock); 3714dcd989cbSTejun Heo 371558a69cb4STejun Heo if (!(wq->flags & WQ_FREEZABLE) || 371635b6bb63STejun Heo !(pool->flags & POOL_FREEZING)) 3717112202d9STejun Heo pwq_set_max_active(pwq, max_active); 3718dcd989cbSTejun Heo 3719e98d5b16STejun Heo spin_unlock(&pool->lock); 3720dcd989cbSTejun Heo } 3721dcd989cbSTejun Heo 3722e98d5b16STejun Heo spin_unlock_irq(&workqueue_lock); 3723dcd989cbSTejun Heo } 3724dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(workqueue_set_max_active); 3725dcd989cbSTejun Heo 3726dcd989cbSTejun Heo /** 3727dcd989cbSTejun Heo * workqueue_congested - test whether a workqueue is congested 3728dcd989cbSTejun Heo * @cpu: CPU in question 3729dcd989cbSTejun Heo * @wq: target workqueue 3730dcd989cbSTejun Heo * 3731dcd989cbSTejun Heo * Test whether @wq's cpu workqueue for @cpu is congested. There is 3732dcd989cbSTejun Heo * no synchronization around this function and the test result is 3733dcd989cbSTejun Heo * unreliable and only useful as advisory hints or for debugging. 3734dcd989cbSTejun Heo * 3735dcd989cbSTejun Heo * RETURNS: 3736dcd989cbSTejun Heo * %true if congested, %false otherwise. 3737dcd989cbSTejun Heo */ 3738d84ff051STejun Heo bool workqueue_congested(int cpu, struct workqueue_struct *wq) 3739dcd989cbSTejun Heo { 37407fb98ea7STejun Heo struct pool_workqueue *pwq; 374176af4d93STejun Heo bool ret; 374276af4d93STejun Heo 374376af4d93STejun Heo preempt_disable(); 37447fb98ea7STejun Heo 37457fb98ea7STejun Heo if (!(wq->flags & WQ_UNBOUND)) 37467fb98ea7STejun Heo pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); 37477fb98ea7STejun Heo else 37487fb98ea7STejun Heo pwq = first_pwq(wq); 3749dcd989cbSTejun Heo 375076af4d93STejun Heo ret = !list_empty(&pwq->delayed_works); 375176af4d93STejun Heo preempt_enable(); 375276af4d93STejun Heo 375376af4d93STejun Heo return ret; 3754dcd989cbSTejun Heo } 3755dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(workqueue_congested); 3756dcd989cbSTejun Heo 3757dcd989cbSTejun Heo /** 3758dcd989cbSTejun Heo * work_busy - test whether a work is currently pending or running 3759dcd989cbSTejun Heo * @work: the work to be tested 3760dcd989cbSTejun Heo * 3761dcd989cbSTejun Heo * Test whether @work is currently pending or running. There is no 3762dcd989cbSTejun Heo * synchronization around this function and the test result is 3763dcd989cbSTejun Heo * unreliable and only useful as advisory hints or for debugging. 3764dcd989cbSTejun Heo * 3765dcd989cbSTejun Heo * RETURNS: 3766dcd989cbSTejun Heo * OR'd bitmask of WORK_BUSY_* bits. 3767dcd989cbSTejun Heo */ 3768dcd989cbSTejun Heo unsigned int work_busy(struct work_struct *work) 3769dcd989cbSTejun Heo { 3770fa1b54e6STejun Heo struct worker_pool *pool; 3771dcd989cbSTejun Heo unsigned long flags; 3772dcd989cbSTejun Heo unsigned int ret = 0; 3773dcd989cbSTejun Heo 3774dcd989cbSTejun Heo if (work_pending(work)) 3775dcd989cbSTejun Heo ret |= WORK_BUSY_PENDING; 3776038366c5SLai Jiangshan 3777fa1b54e6STejun Heo local_irq_save(flags); 3778fa1b54e6STejun Heo pool = get_work_pool(work); 3779038366c5SLai Jiangshan if (pool) { 3780fa1b54e6STejun Heo spin_lock(&pool->lock); 3781c9e7cf27STejun Heo if (find_worker_executing_work(pool, work)) 3782dcd989cbSTejun Heo ret |= WORK_BUSY_RUNNING; 3783fa1b54e6STejun Heo spin_unlock(&pool->lock); 3784038366c5SLai Jiangshan } 3785fa1b54e6STejun Heo local_irq_restore(flags); 3786dcd989cbSTejun Heo 3787dcd989cbSTejun Heo return ret; 3788dcd989cbSTejun Heo } 3789dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(work_busy); 3790dcd989cbSTejun Heo 3791db7bccf4STejun Heo /* 3792db7bccf4STejun Heo * CPU hotplug. 3793db7bccf4STejun Heo * 3794e22bee78STejun Heo * There are two challenges in supporting CPU hotplug. Firstly, there 3795112202d9STejun Heo * are a lot of assumptions on strong associations among work, pwq and 3796706026c2STejun Heo * pool which make migrating pending and scheduled works very 3797e22bee78STejun Heo * difficult to implement without impacting hot paths. Secondly, 379894cf58bbSTejun Heo * worker pools serve mix of short, long and very long running works making 3799e22bee78STejun Heo * blocked draining impractical. 3800e22bee78STejun Heo * 380124647570STejun Heo * This is solved by allowing the pools to be disassociated from the CPU 3802628c78e7STejun Heo * running as an unbound one and allowing it to be reattached later if the 3803628c78e7STejun Heo * cpu comes back online. 3804db7bccf4STejun Heo */ 3805db7bccf4STejun Heo 3806706026c2STejun Heo static void wq_unbind_fn(struct work_struct *work) 3807db7bccf4STejun Heo { 380838db41d9STejun Heo int cpu = smp_processor_id(); 38094ce62e9eSTejun Heo struct worker_pool *pool; 3810db7bccf4STejun Heo struct worker *worker; 3811db7bccf4STejun Heo int i; 3812db7bccf4STejun Heo 3813f02ae73aSTejun Heo for_each_cpu_worker_pool(pool, cpu) { 38146183c009STejun Heo WARN_ON_ONCE(cpu != smp_processor_id()); 3815db7bccf4STejun Heo 381694cf58bbSTejun Heo mutex_lock(&pool->assoc_mutex); 381794cf58bbSTejun Heo spin_lock_irq(&pool->lock); 3818e22bee78STejun Heo 3819f2d5a0eeSTejun Heo /* 382094cf58bbSTejun Heo * We've claimed all manager positions. Make all workers 382194cf58bbSTejun Heo * unbound and set DISASSOCIATED. Before this, all workers 382294cf58bbSTejun Heo * except for the ones which are still executing works from 382394cf58bbSTejun Heo * before the last CPU down must be on the cpu. After 382494cf58bbSTejun Heo * this, they may become diasporas. 3825f2d5a0eeSTejun Heo */ 38264ce62e9eSTejun Heo list_for_each_entry(worker, &pool->idle_list, entry) 3827403c821dSTejun Heo worker->flags |= WORKER_UNBOUND; 3828db7bccf4STejun Heo 3829b67bfe0dSSasha Levin for_each_busy_worker(worker, i, pool) 3830403c821dSTejun Heo worker->flags |= WORKER_UNBOUND; 3831db7bccf4STejun Heo 383224647570STejun Heo pool->flags |= POOL_DISASSOCIATED; 3833f2d5a0eeSTejun Heo 383494cf58bbSTejun Heo spin_unlock_irq(&pool->lock); 383594cf58bbSTejun Heo mutex_unlock(&pool->assoc_mutex); 383694cf58bbSTejun Heo } 3837e22bee78STejun Heo 3838e22bee78STejun Heo /* 3839628c78e7STejun Heo * Call schedule() so that we cross rq->lock and thus can guarantee 3840628c78e7STejun Heo * sched callbacks see the %WORKER_UNBOUND flag. This is necessary 3841628c78e7STejun Heo * as scheduler callbacks may be invoked from other cpus. 3842628c78e7STejun Heo */ 3843628c78e7STejun Heo schedule(); 3844628c78e7STejun Heo 3845628c78e7STejun Heo /* 3846628c78e7STejun Heo * Sched callbacks are disabled now. Zap nr_running. After this, 3847628c78e7STejun Heo * nr_running stays zero and need_more_worker() and keep_working() 384838db41d9STejun Heo * are always true as long as the worklist is not empty. Pools on 384938db41d9STejun Heo * @cpu now behave as unbound (in terms of concurrency management) 385038db41d9STejun Heo * pools which are served by workers tied to the CPU. 3851628c78e7STejun Heo * 3852628c78e7STejun Heo * On return from this function, the current worker would trigger 3853628c78e7STejun Heo * unbound chain execution of pending work items if other workers 3854628c78e7STejun Heo * didn't already. 3855e22bee78STejun Heo */ 3856f02ae73aSTejun Heo for_each_cpu_worker_pool(pool, cpu) 3857e19e397aSTejun Heo atomic_set(&pool->nr_running, 0); 3858db7bccf4STejun Heo } 3859db7bccf4STejun Heo 38608db25e78STejun Heo /* 38618db25e78STejun Heo * Workqueues should be brought up before normal priority CPU notifiers. 38628db25e78STejun Heo * This will be registered high priority CPU notifier. 38638db25e78STejun Heo */ 38649fdf9b73SLai Jiangshan static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, 38651da177e4SLinus Torvalds unsigned long action, 38661da177e4SLinus Torvalds void *hcpu) 38671da177e4SLinus Torvalds { 3868d84ff051STejun Heo int cpu = (unsigned long)hcpu; 38694ce62e9eSTejun Heo struct worker_pool *pool; 38701da177e4SLinus Torvalds 38718db25e78STejun Heo switch (action & ~CPU_TASKS_FROZEN) { 38723af24433SOleg Nesterov case CPU_UP_PREPARE: 3873f02ae73aSTejun Heo for_each_cpu_worker_pool(pool, cpu) { 38743ce63377STejun Heo struct worker *worker; 38753ce63377STejun Heo 38763ce63377STejun Heo if (pool->nr_workers) 38773ce63377STejun Heo continue; 38783ce63377STejun Heo 38793ce63377STejun Heo worker = create_worker(pool); 38803ce63377STejun Heo if (!worker) 38813ce63377STejun Heo return NOTIFY_BAD; 38823ce63377STejun Heo 3883d565ed63STejun Heo spin_lock_irq(&pool->lock); 38843ce63377STejun Heo start_worker(worker); 3885d565ed63STejun Heo spin_unlock_irq(&pool->lock); 38863af24433SOleg Nesterov } 38871da177e4SLinus Torvalds break; 38881da177e4SLinus Torvalds 388965758202STejun Heo case CPU_DOWN_FAILED: 389065758202STejun Heo case CPU_ONLINE: 3891f02ae73aSTejun Heo for_each_cpu_worker_pool(pool, cpu) { 389294cf58bbSTejun Heo mutex_lock(&pool->assoc_mutex); 389394cf58bbSTejun Heo spin_lock_irq(&pool->lock); 389494cf58bbSTejun Heo 389524647570STejun Heo pool->flags &= ~POOL_DISASSOCIATED; 389694cf58bbSTejun Heo rebind_workers(pool); 389794cf58bbSTejun Heo 389894cf58bbSTejun Heo spin_unlock_irq(&pool->lock); 389994cf58bbSTejun Heo mutex_unlock(&pool->assoc_mutex); 390094cf58bbSTejun Heo } 39018db25e78STejun Heo break; 390265758202STejun Heo } 390365758202STejun Heo return NOTIFY_OK; 390465758202STejun Heo } 390565758202STejun Heo 390665758202STejun Heo /* 390765758202STejun Heo * Workqueues should be brought down after normal priority CPU notifiers. 390865758202STejun Heo * This will be registered as low priority CPU notifier. 390965758202STejun Heo */ 39109fdf9b73SLai Jiangshan static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb, 391165758202STejun Heo unsigned long action, 391265758202STejun Heo void *hcpu) 391365758202STejun Heo { 3914d84ff051STejun Heo int cpu = (unsigned long)hcpu; 39158db25e78STejun Heo struct work_struct unbind_work; 39168db25e78STejun Heo 391765758202STejun Heo switch (action & ~CPU_TASKS_FROZEN) { 391865758202STejun Heo case CPU_DOWN_PREPARE: 39198db25e78STejun Heo /* unbinding should happen on the local CPU */ 3920706026c2STejun Heo INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); 39217635d2fdSJoonsoo Kim queue_work_on(cpu, system_highpri_wq, &unbind_work); 39228db25e78STejun Heo flush_work(&unbind_work); 39238db25e78STejun Heo break; 392465758202STejun Heo } 392565758202STejun Heo return NOTIFY_OK; 392665758202STejun Heo } 392765758202STejun Heo 39282d3854a3SRusty Russell #ifdef CONFIG_SMP 39298ccad40dSRusty Russell 39302d3854a3SRusty Russell struct work_for_cpu { 3931ed48ece2STejun Heo struct work_struct work; 39322d3854a3SRusty Russell long (*fn)(void *); 39332d3854a3SRusty Russell void *arg; 39342d3854a3SRusty Russell long ret; 39352d3854a3SRusty Russell }; 39362d3854a3SRusty Russell 3937ed48ece2STejun Heo static void work_for_cpu_fn(struct work_struct *work) 39382d3854a3SRusty Russell { 3939ed48ece2STejun Heo struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); 3940ed48ece2STejun Heo 39412d3854a3SRusty Russell wfc->ret = wfc->fn(wfc->arg); 39422d3854a3SRusty Russell } 39432d3854a3SRusty Russell 39442d3854a3SRusty Russell /** 39452d3854a3SRusty Russell * work_on_cpu - run a function in user context on a particular cpu 39462d3854a3SRusty Russell * @cpu: the cpu to run on 39472d3854a3SRusty Russell * @fn: the function to run 39482d3854a3SRusty Russell * @arg: the function arg 39492d3854a3SRusty Russell * 395031ad9081SRusty Russell * This will return the value @fn returns. 395131ad9081SRusty Russell * It is up to the caller to ensure that the cpu doesn't go offline. 39526b44003eSAndrew Morton * The caller must not hold any locks which would prevent @fn from completing. 39532d3854a3SRusty Russell */ 3954d84ff051STejun Heo long work_on_cpu(int cpu, long (*fn)(void *), void *arg) 39552d3854a3SRusty Russell { 3956ed48ece2STejun Heo struct work_for_cpu wfc = { .fn = fn, .arg = arg }; 39572d3854a3SRusty Russell 3958ed48ece2STejun Heo INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); 3959ed48ece2STejun Heo schedule_work_on(cpu, &wfc.work); 3960ed48ece2STejun Heo flush_work(&wfc.work); 39612d3854a3SRusty Russell return wfc.ret; 39622d3854a3SRusty Russell } 39632d3854a3SRusty Russell EXPORT_SYMBOL_GPL(work_on_cpu); 39642d3854a3SRusty Russell #endif /* CONFIG_SMP */ 39652d3854a3SRusty Russell 3966a0a1a5fdSTejun Heo #ifdef CONFIG_FREEZER 3967e7577c50SRusty Russell 3968a0a1a5fdSTejun Heo /** 3969a0a1a5fdSTejun Heo * freeze_workqueues_begin - begin freezing workqueues 3970a0a1a5fdSTejun Heo * 397158a69cb4STejun Heo * Start freezing workqueues. After this function returns, all freezable 397258a69cb4STejun Heo * workqueues will queue new works to their frozen_works list instead of 3973706026c2STejun Heo * pool->worklist. 3974a0a1a5fdSTejun Heo * 3975a0a1a5fdSTejun Heo * CONTEXT: 3976d565ed63STejun Heo * Grabs and releases workqueue_lock and pool->lock's. 3977a0a1a5fdSTejun Heo */ 3978a0a1a5fdSTejun Heo void freeze_workqueues_begin(void) 3979a0a1a5fdSTejun Heo { 398017116969STejun Heo struct worker_pool *pool; 398124b8a847STejun Heo struct workqueue_struct *wq; 398224b8a847STejun Heo struct pool_workqueue *pwq; 398317116969STejun Heo int id; 3984a0a1a5fdSTejun Heo 3985e98d5b16STejun Heo spin_lock_irq(&workqueue_lock); 3986a0a1a5fdSTejun Heo 39876183c009STejun Heo WARN_ON_ONCE(workqueue_freezing); 3988a0a1a5fdSTejun Heo workqueue_freezing = true; 3989a0a1a5fdSTejun Heo 399024b8a847STejun Heo /* set FREEZING */ 399117116969STejun Heo for_each_pool(pool, id) { 3992e98d5b16STejun Heo spin_lock(&pool->lock); 399335b6bb63STejun Heo WARN_ON_ONCE(pool->flags & POOL_FREEZING); 399435b6bb63STejun Heo pool->flags |= POOL_FREEZING; 399524b8a847STejun Heo spin_unlock(&pool->lock); 39961da177e4SLinus Torvalds } 39978b03ae3cSTejun Heo 399824b8a847STejun Heo /* suppress further executions by setting max_active to zero */ 399924b8a847STejun Heo list_for_each_entry(wq, &workqueues, list) { 400024b8a847STejun Heo if (!(wq->flags & WQ_FREEZABLE)) 400124b8a847STejun Heo continue; 400224b8a847STejun Heo 400324b8a847STejun Heo for_each_pwq(pwq, wq) { 400424b8a847STejun Heo spin_lock(&pwq->pool->lock); 400524b8a847STejun Heo pwq->max_active = 0; 400624b8a847STejun Heo spin_unlock(&pwq->pool->lock); 400724b8a847STejun Heo } 4008a1056305STejun Heo } 4009a0a1a5fdSTejun Heo 4010e98d5b16STejun Heo spin_unlock_irq(&workqueue_lock); 4011a0a1a5fdSTejun Heo } 4012a0a1a5fdSTejun Heo 4013a0a1a5fdSTejun Heo /** 401458a69cb4STejun Heo * freeze_workqueues_busy - are freezable workqueues still busy? 4015a0a1a5fdSTejun Heo * 4016a0a1a5fdSTejun Heo * Check whether freezing is complete. This function must be called 4017a0a1a5fdSTejun Heo * between freeze_workqueues_begin() and thaw_workqueues(). 4018a0a1a5fdSTejun Heo * 4019a0a1a5fdSTejun Heo * CONTEXT: 4020a0a1a5fdSTejun Heo * Grabs and releases workqueue_lock. 4021a0a1a5fdSTejun Heo * 4022a0a1a5fdSTejun Heo * RETURNS: 402358a69cb4STejun Heo * %true if some freezable workqueues are still busy. %false if freezing 402458a69cb4STejun Heo * is complete. 4025a0a1a5fdSTejun Heo */ 4026a0a1a5fdSTejun Heo bool freeze_workqueues_busy(void) 4027a0a1a5fdSTejun Heo { 4028a0a1a5fdSTejun Heo bool busy = false; 402924b8a847STejun Heo struct workqueue_struct *wq; 403024b8a847STejun Heo struct pool_workqueue *pwq; 4031a0a1a5fdSTejun Heo 4032e98d5b16STejun Heo spin_lock_irq(&workqueue_lock); 4033a0a1a5fdSTejun Heo 40346183c009STejun Heo WARN_ON_ONCE(!workqueue_freezing); 4035a0a1a5fdSTejun Heo 403624b8a847STejun Heo list_for_each_entry(wq, &workqueues, list) { 403724b8a847STejun Heo if (!(wq->flags & WQ_FREEZABLE)) 403824b8a847STejun Heo continue; 4039a0a1a5fdSTejun Heo /* 4040a0a1a5fdSTejun Heo * nr_active is monotonically decreasing. It's safe 4041a0a1a5fdSTejun Heo * to peek without lock. 4042a0a1a5fdSTejun Heo */ 404324b8a847STejun Heo for_each_pwq(pwq, wq) { 40446183c009STejun Heo WARN_ON_ONCE(pwq->nr_active < 0); 4045112202d9STejun Heo if (pwq->nr_active) { 4046a0a1a5fdSTejun Heo busy = true; 4047a0a1a5fdSTejun Heo goto out_unlock; 4048a0a1a5fdSTejun Heo } 4049a0a1a5fdSTejun Heo } 4050a0a1a5fdSTejun Heo } 4051a0a1a5fdSTejun Heo out_unlock: 4052e98d5b16STejun Heo spin_unlock_irq(&workqueue_lock); 4053a0a1a5fdSTejun Heo return busy; 4054a0a1a5fdSTejun Heo } 4055a0a1a5fdSTejun Heo 4056a0a1a5fdSTejun Heo /** 4057a0a1a5fdSTejun Heo * thaw_workqueues - thaw workqueues 4058a0a1a5fdSTejun Heo * 4059a0a1a5fdSTejun Heo * Thaw workqueues. Normal queueing is restored and all collected 4060706026c2STejun Heo * frozen works are transferred to their respective pool worklists. 4061a0a1a5fdSTejun Heo * 4062a0a1a5fdSTejun Heo * CONTEXT: 4063d565ed63STejun Heo * Grabs and releases workqueue_lock and pool->lock's. 4064a0a1a5fdSTejun Heo */ 4065a0a1a5fdSTejun Heo void thaw_workqueues(void) 4066a0a1a5fdSTejun Heo { 406724b8a847STejun Heo struct workqueue_struct *wq; 406824b8a847STejun Heo struct pool_workqueue *pwq; 406924b8a847STejun Heo struct worker_pool *pool; 407024b8a847STejun Heo int id; 4071a0a1a5fdSTejun Heo 4072e98d5b16STejun Heo spin_lock_irq(&workqueue_lock); 4073a0a1a5fdSTejun Heo 4074a0a1a5fdSTejun Heo if (!workqueue_freezing) 4075a0a1a5fdSTejun Heo goto out_unlock; 4076a0a1a5fdSTejun Heo 407724b8a847STejun Heo /* clear FREEZING */ 407824b8a847STejun Heo for_each_pool(pool, id) { 4079e98d5b16STejun Heo spin_lock(&pool->lock); 408035b6bb63STejun Heo WARN_ON_ONCE(!(pool->flags & POOL_FREEZING)); 408135b6bb63STejun Heo pool->flags &= ~POOL_FREEZING; 4082e98d5b16STejun Heo spin_unlock(&pool->lock); 4083d565ed63STejun Heo } 408424b8a847STejun Heo 408524b8a847STejun Heo /* restore max_active and repopulate worklist */ 408624b8a847STejun Heo list_for_each_entry(wq, &workqueues, list) { 408724b8a847STejun Heo if (!(wq->flags & WQ_FREEZABLE)) 408824b8a847STejun Heo continue; 408924b8a847STejun Heo 409024b8a847STejun Heo for_each_pwq(pwq, wq) { 409124b8a847STejun Heo spin_lock(&pwq->pool->lock); 409224b8a847STejun Heo pwq_set_max_active(pwq, wq->saved_max_active); 409324b8a847STejun Heo spin_unlock(&pwq->pool->lock); 409424b8a847STejun Heo } 409524b8a847STejun Heo } 409624b8a847STejun Heo 409724b8a847STejun Heo /* kick workers */ 409824b8a847STejun Heo for_each_pool(pool, id) { 409924b8a847STejun Heo spin_lock(&pool->lock); 410024b8a847STejun Heo wake_up_worker(pool); 410124b8a847STejun Heo spin_unlock(&pool->lock); 4102a0a1a5fdSTejun Heo } 4103a0a1a5fdSTejun Heo 4104a0a1a5fdSTejun Heo workqueue_freezing = false; 4105a0a1a5fdSTejun Heo out_unlock: 4106e98d5b16STejun Heo spin_unlock_irq(&workqueue_lock); 4107a0a1a5fdSTejun Heo } 4108a0a1a5fdSTejun Heo #endif /* CONFIG_FREEZER */ 4109a0a1a5fdSTejun Heo 41106ee0578bSSuresh Siddha static int __init init_workqueues(void) 41111da177e4SLinus Torvalds { 41127a4e344cSTejun Heo int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; 41137a4e344cSTejun Heo int i, cpu; 4114c34056a3STejun Heo 41157c3eed5cSTejun Heo /* make sure we have enough bits for OFFQ pool ID */ 41167c3eed5cSTejun Heo BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) < 41176be19588SLai Jiangshan WORK_CPU_END * NR_STD_WORKER_POOLS); 4118b5490077STejun Heo 4119e904e6c2STejun Heo WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); 4120e904e6c2STejun Heo 4121e904e6c2STejun Heo pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); 4122e904e6c2STejun Heo 412365758202STejun Heo cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP); 4124a5b4e57dSLai Jiangshan hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN); 41258b03ae3cSTejun Heo 4126706026c2STejun Heo /* initialize CPU pools */ 412729c91e99STejun Heo for_each_possible_cpu(cpu) { 41284ce62e9eSTejun Heo struct worker_pool *pool; 41298b03ae3cSTejun Heo 41307a4e344cSTejun Heo i = 0; 4131f02ae73aSTejun Heo for_each_cpu_worker_pool(pool, cpu) { 41327a4e344cSTejun Heo BUG_ON(init_worker_pool(pool)); 4133ec22ca5eSTejun Heo pool->cpu = cpu; 41347a4e344cSTejun Heo cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); 41357a4e344cSTejun Heo pool->attrs->nice = std_nice[i++]; 41367a4e344cSTejun Heo 41379daf9e67STejun Heo /* alloc pool ID */ 41389daf9e67STejun Heo BUG_ON(worker_pool_assign_id(pool)); 41394ce62e9eSTejun Heo } 41408b03ae3cSTejun Heo } 41418b03ae3cSTejun Heo 4142e22bee78STejun Heo /* create the initial worker */ 414329c91e99STejun Heo for_each_online_cpu(cpu) { 41444ce62e9eSTejun Heo struct worker_pool *pool; 4145e22bee78STejun Heo 4146f02ae73aSTejun Heo for_each_cpu_worker_pool(pool, cpu) { 41474ce62e9eSTejun Heo struct worker *worker; 41484ce62e9eSTejun Heo 414924647570STejun Heo pool->flags &= ~POOL_DISASSOCIATED; 415024647570STejun Heo 4151bc2ae0f5STejun Heo worker = create_worker(pool); 4152e22bee78STejun Heo BUG_ON(!worker); 4153d565ed63STejun Heo spin_lock_irq(&pool->lock); 4154e22bee78STejun Heo start_worker(worker); 4155d565ed63STejun Heo spin_unlock_irq(&pool->lock); 4156e22bee78STejun Heo } 41574ce62e9eSTejun Heo } 4158e22bee78STejun Heo 415929c91e99STejun Heo /* create default unbound wq attrs */ 416029c91e99STejun Heo for (i = 0; i < NR_STD_WORKER_POOLS; i++) { 416129c91e99STejun Heo struct workqueue_attrs *attrs; 416229c91e99STejun Heo 416329c91e99STejun Heo BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); 416429c91e99STejun Heo 416529c91e99STejun Heo attrs->nice = std_nice[i]; 416629c91e99STejun Heo cpumask_setall(attrs->cpumask); 416729c91e99STejun Heo 416829c91e99STejun Heo unbound_std_wq_attrs[i] = attrs; 416929c91e99STejun Heo } 417029c91e99STejun Heo 4171d320c038STejun Heo system_wq = alloc_workqueue("events", 0, 0); 41721aabe902SJoonsoo Kim system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0); 4173d320c038STejun Heo system_long_wq = alloc_workqueue("events_long", 0, 0); 4174f3421797STejun Heo system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, 4175f3421797STejun Heo WQ_UNBOUND_MAX_ACTIVE); 417624d51addSTejun Heo system_freezable_wq = alloc_workqueue("events_freezable", 417724d51addSTejun Heo WQ_FREEZABLE, 0); 41781aabe902SJoonsoo Kim BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq || 4179ae930e0fSTejun Heo !system_unbound_wq || !system_freezable_wq); 41806ee0578bSSuresh Siddha return 0; 41811da177e4SLinus Torvalds } 41826ee0578bSSuresh Siddha early_initcall(init_workqueues); 4183