11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/kernel/workqueue.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Generic mechanism for defining kernel helper threads for running 51da177e4SLinus Torvalds * arbitrary tasks in process context. 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Started by Ingo Molnar, Copyright (C) 2002 81da177e4SLinus Torvalds * 91da177e4SLinus Torvalds * Derived from the taskqueue/keventd code by: 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * David Woodhouse <dwmw2@infradead.org> 12e1f8e874SFrancois Cami * Andrew Morton 131da177e4SLinus Torvalds * Kai Petzke <wpp@marie.physik.tu-berlin.de> 141da177e4SLinus Torvalds * Theodore Ts'o <tytso@mit.edu> 1589ada679SChristoph Lameter * 16cde53535SChristoph Lameter * Made to use alloc_percpu by Christoph Lameter. 171da177e4SLinus Torvalds */ 181da177e4SLinus Torvalds 191da177e4SLinus Torvalds #include <linux/module.h> 201da177e4SLinus Torvalds #include <linux/kernel.h> 211da177e4SLinus Torvalds #include <linux/sched.h> 221da177e4SLinus Torvalds #include <linux/init.h> 231da177e4SLinus Torvalds #include <linux/signal.h> 241da177e4SLinus Torvalds #include <linux/completion.h> 251da177e4SLinus Torvalds #include <linux/workqueue.h> 261da177e4SLinus Torvalds #include <linux/slab.h> 271da177e4SLinus Torvalds #include <linux/cpu.h> 281da177e4SLinus Torvalds #include <linux/notifier.h> 291da177e4SLinus Torvalds #include <linux/kthread.h> 301fa44ecaSJames Bottomley #include <linux/hardirq.h> 3146934023SChristoph Lameter #include <linux/mempolicy.h> 32341a5958SRafael J. Wysocki #include <linux/freezer.h> 33d5abe669SPeter Zijlstra #include <linux/kallsyms.h> 34d5abe669SPeter Zijlstra #include <linux/debug_locks.h> 354e6045f1SJohannes Berg #include <linux/lockdep.h> 36c34056a3STejun Heo #include <linux/idr.h> 371da177e4SLinus Torvalds 381da177e4SLinus Torvalds /* 394690c4abSTejun Heo * Structure fields follow one of the following exclusion rules. 404690c4abSTejun Heo * 414690c4abSTejun Heo * I: Set during initialization and read-only afterwards. 424690c4abSTejun Heo * 438b03ae3cSTejun Heo * L: gcwq->lock protected. Access with gcwq->lock held. 444690c4abSTejun Heo * 4573f53c4aSTejun Heo * F: wq->flush_mutex protected. 4673f53c4aSTejun Heo * 474690c4abSTejun Heo * W: workqueue_lock protected. 484690c4abSTejun Heo */ 494690c4abSTejun Heo 508b03ae3cSTejun Heo struct global_cwq; 51c34056a3STejun Heo struct cpu_workqueue_struct; 52c34056a3STejun Heo 53c34056a3STejun Heo struct worker { 54c34056a3STejun Heo struct work_struct *current_work; /* L: work being processed */ 55affee4b2STejun Heo struct list_head scheduled; /* L: scheduled works */ 56c34056a3STejun Heo struct task_struct *task; /* I: worker task */ 578b03ae3cSTejun Heo struct global_cwq *gcwq; /* I: the associated gcwq */ 58c34056a3STejun Heo struct cpu_workqueue_struct *cwq; /* I: the associated cwq */ 59c34056a3STejun Heo int id; /* I: worker id */ 60c34056a3STejun Heo }; 61c34056a3STejun Heo 624690c4abSTejun Heo /* 638b03ae3cSTejun Heo * Global per-cpu workqueue. 648b03ae3cSTejun Heo */ 658b03ae3cSTejun Heo struct global_cwq { 668b03ae3cSTejun Heo spinlock_t lock; /* the gcwq lock */ 678b03ae3cSTejun Heo unsigned int cpu; /* I: the associated cpu */ 688b03ae3cSTejun Heo struct ida worker_ida; /* L: for worker IDs */ 698b03ae3cSTejun Heo } ____cacheline_aligned_in_smp; 708b03ae3cSTejun Heo 718b03ae3cSTejun Heo /* 72f756d5e2SNathan Lynch * The per-CPU workqueue (if single thread, we always use the first 730f900049STejun Heo * possible cpu). The lower WORK_STRUCT_FLAG_BITS of 740f900049STejun Heo * work_struct->data are used for flags and thus cwqs need to be 750f900049STejun Heo * aligned at two's power of the number of flag bits. 761da177e4SLinus Torvalds */ 771da177e4SLinus Torvalds struct cpu_workqueue_struct { 788b03ae3cSTejun Heo struct global_cwq *gcwq; /* I: the associated gcwq */ 791da177e4SLinus Torvalds struct list_head worklist; 801da177e4SLinus Torvalds wait_queue_head_t more_work; 81c34056a3STejun Heo struct worker *worker; 824690c4abSTejun Heo struct workqueue_struct *wq; /* I: the owning workqueue */ 8373f53c4aSTejun Heo int work_color; /* L: current color */ 8473f53c4aSTejun Heo int flush_color; /* L: flushing color */ 8573f53c4aSTejun Heo int nr_in_flight[WORK_NR_COLORS]; 8673f53c4aSTejun Heo /* L: nr of in_flight works */ 871e19ffc6STejun Heo int nr_active; /* L: nr of active works */ 88a0a1a5fdSTejun Heo int max_active; /* L: max active works */ 891e19ffc6STejun Heo struct list_head delayed_works; /* L: delayed works */ 900f900049STejun Heo }; 911da177e4SLinus Torvalds 921da177e4SLinus Torvalds /* 9373f53c4aSTejun Heo * Structure used to wait for workqueue flush. 9473f53c4aSTejun Heo */ 9573f53c4aSTejun Heo struct wq_flusher { 9673f53c4aSTejun Heo struct list_head list; /* F: list of flushers */ 9773f53c4aSTejun Heo int flush_color; /* F: flush color waiting for */ 9873f53c4aSTejun Heo struct completion done; /* flush completion */ 9973f53c4aSTejun Heo }; 10073f53c4aSTejun Heo 10173f53c4aSTejun Heo /* 1021da177e4SLinus Torvalds * The externally visible workqueue abstraction is an array of 1031da177e4SLinus Torvalds * per-CPU workqueues: 1041da177e4SLinus Torvalds */ 1051da177e4SLinus Torvalds struct workqueue_struct { 10697e37d7bSTejun Heo unsigned int flags; /* I: WQ_* flags */ 1074690c4abSTejun Heo struct cpu_workqueue_struct *cpu_wq; /* I: cwq's */ 1084690c4abSTejun Heo struct list_head list; /* W: list of all workqueues */ 10973f53c4aSTejun Heo 11073f53c4aSTejun Heo struct mutex flush_mutex; /* protects wq flushing */ 11173f53c4aSTejun Heo int work_color; /* F: current work color */ 11273f53c4aSTejun Heo int flush_color; /* F: current flush color */ 11373f53c4aSTejun Heo atomic_t nr_cwqs_to_flush; /* flush in progress */ 11473f53c4aSTejun Heo struct wq_flusher *first_flusher; /* F: first flusher */ 11573f53c4aSTejun Heo struct list_head flusher_queue; /* F: flush waiters */ 11673f53c4aSTejun Heo struct list_head flusher_overflow; /* F: flush overflow list */ 11773f53c4aSTejun Heo 118a0a1a5fdSTejun Heo int saved_max_active; /* I: saved cwq max_active */ 1194690c4abSTejun Heo const char *name; /* I: workqueue name */ 1204e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP 1214e6045f1SJohannes Berg struct lockdep_map lockdep_map; 1224e6045f1SJohannes Berg #endif 1231da177e4SLinus Torvalds }; 1241da177e4SLinus Torvalds 125dc186ad7SThomas Gleixner #ifdef CONFIG_DEBUG_OBJECTS_WORK 126dc186ad7SThomas Gleixner 127dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr; 128dc186ad7SThomas Gleixner 129dc186ad7SThomas Gleixner /* 130dc186ad7SThomas Gleixner * fixup_init is called when: 131dc186ad7SThomas Gleixner * - an active object is initialized 132dc186ad7SThomas Gleixner */ 133dc186ad7SThomas Gleixner static int work_fixup_init(void *addr, enum debug_obj_state state) 134dc186ad7SThomas Gleixner { 135dc186ad7SThomas Gleixner struct work_struct *work = addr; 136dc186ad7SThomas Gleixner 137dc186ad7SThomas Gleixner switch (state) { 138dc186ad7SThomas Gleixner case ODEBUG_STATE_ACTIVE: 139dc186ad7SThomas Gleixner cancel_work_sync(work); 140dc186ad7SThomas Gleixner debug_object_init(work, &work_debug_descr); 141dc186ad7SThomas Gleixner return 1; 142dc186ad7SThomas Gleixner default: 143dc186ad7SThomas Gleixner return 0; 144dc186ad7SThomas Gleixner } 145dc186ad7SThomas Gleixner } 146dc186ad7SThomas Gleixner 147dc186ad7SThomas Gleixner /* 148dc186ad7SThomas Gleixner * fixup_activate is called when: 149dc186ad7SThomas Gleixner * - an active object is activated 150dc186ad7SThomas Gleixner * - an unknown object is activated (might be a statically initialized object) 151dc186ad7SThomas Gleixner */ 152dc186ad7SThomas Gleixner static int work_fixup_activate(void *addr, enum debug_obj_state state) 153dc186ad7SThomas Gleixner { 154dc186ad7SThomas Gleixner struct work_struct *work = addr; 155dc186ad7SThomas Gleixner 156dc186ad7SThomas Gleixner switch (state) { 157dc186ad7SThomas Gleixner 158dc186ad7SThomas Gleixner case ODEBUG_STATE_NOTAVAILABLE: 159dc186ad7SThomas Gleixner /* 160dc186ad7SThomas Gleixner * This is not really a fixup. The work struct was 161dc186ad7SThomas Gleixner * statically initialized. We just make sure that it 162dc186ad7SThomas Gleixner * is tracked in the object tracker. 163dc186ad7SThomas Gleixner */ 16422df02bbSTejun Heo if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) { 165dc186ad7SThomas Gleixner debug_object_init(work, &work_debug_descr); 166dc186ad7SThomas Gleixner debug_object_activate(work, &work_debug_descr); 167dc186ad7SThomas Gleixner return 0; 168dc186ad7SThomas Gleixner } 169dc186ad7SThomas Gleixner WARN_ON_ONCE(1); 170dc186ad7SThomas Gleixner return 0; 171dc186ad7SThomas Gleixner 172dc186ad7SThomas Gleixner case ODEBUG_STATE_ACTIVE: 173dc186ad7SThomas Gleixner WARN_ON(1); 174dc186ad7SThomas Gleixner 175dc186ad7SThomas Gleixner default: 176dc186ad7SThomas Gleixner return 0; 177dc186ad7SThomas Gleixner } 178dc186ad7SThomas Gleixner } 179dc186ad7SThomas Gleixner 180dc186ad7SThomas Gleixner /* 181dc186ad7SThomas Gleixner * fixup_free is called when: 182dc186ad7SThomas Gleixner * - an active object is freed 183dc186ad7SThomas Gleixner */ 184dc186ad7SThomas Gleixner static int work_fixup_free(void *addr, enum debug_obj_state state) 185dc186ad7SThomas Gleixner { 186dc186ad7SThomas Gleixner struct work_struct *work = addr; 187dc186ad7SThomas Gleixner 188dc186ad7SThomas Gleixner switch (state) { 189dc186ad7SThomas Gleixner case ODEBUG_STATE_ACTIVE: 190dc186ad7SThomas Gleixner cancel_work_sync(work); 191dc186ad7SThomas Gleixner debug_object_free(work, &work_debug_descr); 192dc186ad7SThomas Gleixner return 1; 193dc186ad7SThomas Gleixner default: 194dc186ad7SThomas Gleixner return 0; 195dc186ad7SThomas Gleixner } 196dc186ad7SThomas Gleixner } 197dc186ad7SThomas Gleixner 198dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr = { 199dc186ad7SThomas Gleixner .name = "work_struct", 200dc186ad7SThomas Gleixner .fixup_init = work_fixup_init, 201dc186ad7SThomas Gleixner .fixup_activate = work_fixup_activate, 202dc186ad7SThomas Gleixner .fixup_free = work_fixup_free, 203dc186ad7SThomas Gleixner }; 204dc186ad7SThomas Gleixner 205dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work) 206dc186ad7SThomas Gleixner { 207dc186ad7SThomas Gleixner debug_object_activate(work, &work_debug_descr); 208dc186ad7SThomas Gleixner } 209dc186ad7SThomas Gleixner 210dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work) 211dc186ad7SThomas Gleixner { 212dc186ad7SThomas Gleixner debug_object_deactivate(work, &work_debug_descr); 213dc186ad7SThomas Gleixner } 214dc186ad7SThomas Gleixner 215dc186ad7SThomas Gleixner void __init_work(struct work_struct *work, int onstack) 216dc186ad7SThomas Gleixner { 217dc186ad7SThomas Gleixner if (onstack) 218dc186ad7SThomas Gleixner debug_object_init_on_stack(work, &work_debug_descr); 219dc186ad7SThomas Gleixner else 220dc186ad7SThomas Gleixner debug_object_init(work, &work_debug_descr); 221dc186ad7SThomas Gleixner } 222dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(__init_work); 223dc186ad7SThomas Gleixner 224dc186ad7SThomas Gleixner void destroy_work_on_stack(struct work_struct *work) 225dc186ad7SThomas Gleixner { 226dc186ad7SThomas Gleixner debug_object_free(work, &work_debug_descr); 227dc186ad7SThomas Gleixner } 228dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(destroy_work_on_stack); 229dc186ad7SThomas Gleixner 230dc186ad7SThomas Gleixner #else 231dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work) { } 232dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work) { } 233dc186ad7SThomas Gleixner #endif 234dc186ad7SThomas Gleixner 23595402b38SGautham R Shenoy /* Serializes the accesses to the list of workqueues. */ 23695402b38SGautham R Shenoy static DEFINE_SPINLOCK(workqueue_lock); 2371da177e4SLinus Torvalds static LIST_HEAD(workqueues); 238a0a1a5fdSTejun Heo static bool workqueue_freezing; /* W: have wqs started freezing? */ 239c34056a3STejun Heo 2408b03ae3cSTejun Heo static DEFINE_PER_CPU(struct global_cwq, global_cwq); 2418b03ae3cSTejun Heo 242c34056a3STejun Heo static int worker_thread(void *__worker); 2431da177e4SLinus Torvalds 2443af24433SOleg Nesterov static int singlethread_cpu __read_mostly; 245b1f4ec17SOleg Nesterov 2468b03ae3cSTejun Heo static struct global_cwq *get_gcwq(unsigned int cpu) 2478b03ae3cSTejun Heo { 2488b03ae3cSTejun Heo return &per_cpu(global_cwq, cpu); 2498b03ae3cSTejun Heo } 2508b03ae3cSTejun Heo 2514690c4abSTejun Heo static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, 2524690c4abSTejun Heo struct workqueue_struct *wq) 253a848e3b6SOleg Nesterov { 254a848e3b6SOleg Nesterov return per_cpu_ptr(wq->cpu_wq, cpu); 255a848e3b6SOleg Nesterov } 256a848e3b6SOleg Nesterov 2571537663fSTejun Heo static struct cpu_workqueue_struct *target_cwq(unsigned int cpu, 2581537663fSTejun Heo struct workqueue_struct *wq) 2591537663fSTejun Heo { 2601537663fSTejun Heo if (unlikely(wq->flags & WQ_SINGLE_THREAD)) 2611537663fSTejun Heo cpu = singlethread_cpu; 2621537663fSTejun Heo return get_cwq(cpu, wq); 2631537663fSTejun Heo } 2641537663fSTejun Heo 26573f53c4aSTejun Heo static unsigned int work_color_to_flags(int color) 26673f53c4aSTejun Heo { 26773f53c4aSTejun Heo return color << WORK_STRUCT_COLOR_SHIFT; 26873f53c4aSTejun Heo } 26973f53c4aSTejun Heo 27073f53c4aSTejun Heo static int get_work_color(struct work_struct *work) 27173f53c4aSTejun Heo { 27273f53c4aSTejun Heo return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) & 27373f53c4aSTejun Heo ((1 << WORK_STRUCT_COLOR_BITS) - 1); 27473f53c4aSTejun Heo } 27573f53c4aSTejun Heo 27673f53c4aSTejun Heo static int work_next_color(int color) 27773f53c4aSTejun Heo { 27873f53c4aSTejun Heo return (color + 1) % WORK_NR_COLORS; 27973f53c4aSTejun Heo } 28073f53c4aSTejun Heo 2814594bf15SDavid Howells /* 2824594bf15SDavid Howells * Set the workqueue on which a work item is to be run 2834594bf15SDavid Howells * - Must *only* be called if the pending flag is set 2844594bf15SDavid Howells */ 285ed7c0feeSOleg Nesterov static inline void set_wq_data(struct work_struct *work, 2864690c4abSTejun Heo struct cpu_workqueue_struct *cwq, 2874690c4abSTejun Heo unsigned long extra_flags) 288365970a1SDavid Howells { 2894594bf15SDavid Howells BUG_ON(!work_pending(work)); 2904594bf15SDavid Howells 2914690c4abSTejun Heo atomic_long_set(&work->data, (unsigned long)cwq | work_static(work) | 29222df02bbSTejun Heo WORK_STRUCT_PENDING | extra_flags); 293365970a1SDavid Howells } 294365970a1SDavid Howells 2954d707b9fSOleg Nesterov /* 2964d707b9fSOleg Nesterov * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued. 2974d707b9fSOleg Nesterov */ 2984d707b9fSOleg Nesterov static inline void clear_wq_data(struct work_struct *work) 2994d707b9fSOleg Nesterov { 3004690c4abSTejun Heo atomic_long_set(&work->data, work_static(work)); 3014d707b9fSOleg Nesterov } 3024d707b9fSOleg Nesterov 30364166699STejun Heo static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) 304365970a1SDavid Howells { 30564166699STejun Heo return (void *)(atomic_long_read(&work->data) & 30664166699STejun Heo WORK_STRUCT_WQ_DATA_MASK); 307365970a1SDavid Howells } 308365970a1SDavid Howells 3094690c4abSTejun Heo /** 3104690c4abSTejun Heo * insert_work - insert a work into cwq 3114690c4abSTejun Heo * @cwq: cwq @work belongs to 3124690c4abSTejun Heo * @work: work to insert 3134690c4abSTejun Heo * @head: insertion point 3144690c4abSTejun Heo * @extra_flags: extra WORK_STRUCT_* flags to set 3154690c4abSTejun Heo * 3164690c4abSTejun Heo * Insert @work into @cwq after @head. 3174690c4abSTejun Heo * 3184690c4abSTejun Heo * CONTEXT: 3198b03ae3cSTejun Heo * spin_lock_irq(gcwq->lock). 3204690c4abSTejun Heo */ 321b89deed3SOleg Nesterov static void insert_work(struct cpu_workqueue_struct *cwq, 3224690c4abSTejun Heo struct work_struct *work, struct list_head *head, 3234690c4abSTejun Heo unsigned int extra_flags) 324b89deed3SOleg Nesterov { 3254690c4abSTejun Heo /* we own @work, set data and link */ 3264690c4abSTejun Heo set_wq_data(work, cwq, extra_flags); 3274690c4abSTejun Heo 3286e84d644SOleg Nesterov /* 3296e84d644SOleg Nesterov * Ensure that we get the right work->data if we see the 3306e84d644SOleg Nesterov * result of list_add() below, see try_to_grab_pending(). 3316e84d644SOleg Nesterov */ 3326e84d644SOleg Nesterov smp_wmb(); 3334690c4abSTejun Heo 3341a4d9b0aSOleg Nesterov list_add_tail(&work->entry, head); 335b89deed3SOleg Nesterov wake_up(&cwq->more_work); 336b89deed3SOleg Nesterov } 337b89deed3SOleg Nesterov 3384690c4abSTejun Heo static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, 3391da177e4SLinus Torvalds struct work_struct *work) 3401da177e4SLinus Torvalds { 3411537663fSTejun Heo struct cpu_workqueue_struct *cwq = target_cwq(cpu, wq); 3428b03ae3cSTejun Heo struct global_cwq *gcwq = cwq->gcwq; 3431e19ffc6STejun Heo struct list_head *worklist; 3441da177e4SLinus Torvalds unsigned long flags; 3451da177e4SLinus Torvalds 346dc186ad7SThomas Gleixner debug_work_activate(work); 3471e19ffc6STejun Heo 3488b03ae3cSTejun Heo spin_lock_irqsave(&gcwq->lock, flags); 3494690c4abSTejun Heo BUG_ON(!list_empty(&work->entry)); 3501e19ffc6STejun Heo 35173f53c4aSTejun Heo cwq->nr_in_flight[cwq->work_color]++; 3521e19ffc6STejun Heo 3531e19ffc6STejun Heo if (likely(cwq->nr_active < cwq->max_active)) { 3541e19ffc6STejun Heo cwq->nr_active++; 3551e19ffc6STejun Heo worklist = &cwq->worklist; 3561e19ffc6STejun Heo } else 3571e19ffc6STejun Heo worklist = &cwq->delayed_works; 3581e19ffc6STejun Heo 3591e19ffc6STejun Heo insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color)); 3601e19ffc6STejun Heo 3618b03ae3cSTejun Heo spin_unlock_irqrestore(&gcwq->lock, flags); 3621da177e4SLinus Torvalds } 3631da177e4SLinus Torvalds 3640fcb78c2SRolf Eike Beer /** 3650fcb78c2SRolf Eike Beer * queue_work - queue work on a workqueue 3660fcb78c2SRolf Eike Beer * @wq: workqueue to use 3670fcb78c2SRolf Eike Beer * @work: work to queue 3680fcb78c2SRolf Eike Beer * 369057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 3701da177e4SLinus Torvalds * 37100dfcaf7SOleg Nesterov * We queue the work to the CPU on which it was submitted, but if the CPU dies 37200dfcaf7SOleg Nesterov * it can be processed by another CPU. 3731da177e4SLinus Torvalds */ 3747ad5b3a5SHarvey Harrison int queue_work(struct workqueue_struct *wq, struct work_struct *work) 3751da177e4SLinus Torvalds { 376ef1ca236SOleg Nesterov int ret; 3771da177e4SLinus Torvalds 378ef1ca236SOleg Nesterov ret = queue_work_on(get_cpu(), wq, work); 379a848e3b6SOleg Nesterov put_cpu(); 380ef1ca236SOleg Nesterov 3811da177e4SLinus Torvalds return ret; 3821da177e4SLinus Torvalds } 383ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work); 3841da177e4SLinus Torvalds 385c1a220e7SZhang Rui /** 386c1a220e7SZhang Rui * queue_work_on - queue work on specific cpu 387c1a220e7SZhang Rui * @cpu: CPU number to execute work on 388c1a220e7SZhang Rui * @wq: workqueue to use 389c1a220e7SZhang Rui * @work: work to queue 390c1a220e7SZhang Rui * 391c1a220e7SZhang Rui * Returns 0 if @work was already on a queue, non-zero otherwise. 392c1a220e7SZhang Rui * 393c1a220e7SZhang Rui * We queue the work to a specific CPU, the caller must ensure it 394c1a220e7SZhang Rui * can't go away. 395c1a220e7SZhang Rui */ 396c1a220e7SZhang Rui int 397c1a220e7SZhang Rui queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) 398c1a220e7SZhang Rui { 399c1a220e7SZhang Rui int ret = 0; 400c1a220e7SZhang Rui 40122df02bbSTejun Heo if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 4024690c4abSTejun Heo __queue_work(cpu, wq, work); 403c1a220e7SZhang Rui ret = 1; 404c1a220e7SZhang Rui } 405c1a220e7SZhang Rui return ret; 406c1a220e7SZhang Rui } 407c1a220e7SZhang Rui EXPORT_SYMBOL_GPL(queue_work_on); 408c1a220e7SZhang Rui 4096d141c3fSLi Zefan static void delayed_work_timer_fn(unsigned long __data) 4101da177e4SLinus Torvalds { 41152bad64dSDavid Howells struct delayed_work *dwork = (struct delayed_work *)__data; 412ed7c0feeSOleg Nesterov struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); 4131da177e4SLinus Torvalds 4144690c4abSTejun Heo __queue_work(smp_processor_id(), cwq->wq, &dwork->work); 4151da177e4SLinus Torvalds } 4161da177e4SLinus Torvalds 4170fcb78c2SRolf Eike Beer /** 4180fcb78c2SRolf Eike Beer * queue_delayed_work - queue work on a workqueue after delay 4190fcb78c2SRolf Eike Beer * @wq: workqueue to use 420af9997e4SRandy Dunlap * @dwork: delayable work to queue 4210fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 4220fcb78c2SRolf Eike Beer * 423057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 4240fcb78c2SRolf Eike Beer */ 4257ad5b3a5SHarvey Harrison int queue_delayed_work(struct workqueue_struct *wq, 42652bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 4271da177e4SLinus Torvalds { 42852bad64dSDavid Howells if (delay == 0) 42963bc0362SOleg Nesterov return queue_work(wq, &dwork->work); 4301da177e4SLinus Torvalds 43163bc0362SOleg Nesterov return queue_delayed_work_on(-1, wq, dwork, delay); 4321da177e4SLinus Torvalds } 433ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work); 4341da177e4SLinus Torvalds 4350fcb78c2SRolf Eike Beer /** 4360fcb78c2SRolf Eike Beer * queue_delayed_work_on - queue work on specific CPU after delay 4370fcb78c2SRolf Eike Beer * @cpu: CPU number to execute work on 4380fcb78c2SRolf Eike Beer * @wq: workqueue to use 439af9997e4SRandy Dunlap * @dwork: work to queue 4400fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 4410fcb78c2SRolf Eike Beer * 442057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 4430fcb78c2SRolf Eike Beer */ 4447a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 44552bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 4467a6bc1cdSVenkatesh Pallipadi { 4477a6bc1cdSVenkatesh Pallipadi int ret = 0; 44852bad64dSDavid Howells struct timer_list *timer = &dwork->timer; 44952bad64dSDavid Howells struct work_struct *work = &dwork->work; 4507a6bc1cdSVenkatesh Pallipadi 45122df02bbSTejun Heo if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 4527a6bc1cdSVenkatesh Pallipadi BUG_ON(timer_pending(timer)); 4537a6bc1cdSVenkatesh Pallipadi BUG_ON(!list_empty(&work->entry)); 4547a6bc1cdSVenkatesh Pallipadi 4558a3e77ccSAndrew Liu timer_stats_timer_set_start_info(&dwork->timer); 4568a3e77ccSAndrew Liu 457ed7c0feeSOleg Nesterov /* This stores cwq for the moment, for the timer_fn */ 4581537663fSTejun Heo set_wq_data(work, target_cwq(raw_smp_processor_id(), wq), 0); 4597a6bc1cdSVenkatesh Pallipadi timer->expires = jiffies + delay; 46052bad64dSDavid Howells timer->data = (unsigned long)dwork; 4617a6bc1cdSVenkatesh Pallipadi timer->function = delayed_work_timer_fn; 46263bc0362SOleg Nesterov 46363bc0362SOleg Nesterov if (unlikely(cpu >= 0)) 4647a6bc1cdSVenkatesh Pallipadi add_timer_on(timer, cpu); 46563bc0362SOleg Nesterov else 46663bc0362SOleg Nesterov add_timer(timer); 4677a6bc1cdSVenkatesh Pallipadi ret = 1; 4687a6bc1cdSVenkatesh Pallipadi } 4697a6bc1cdSVenkatesh Pallipadi return ret; 4707a6bc1cdSVenkatesh Pallipadi } 471ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on); 4721da177e4SLinus Torvalds 473c34056a3STejun Heo static struct worker *alloc_worker(void) 474c34056a3STejun Heo { 475c34056a3STejun Heo struct worker *worker; 476c34056a3STejun Heo 477c34056a3STejun Heo worker = kzalloc(sizeof(*worker), GFP_KERNEL); 478affee4b2STejun Heo if (worker) 479affee4b2STejun Heo INIT_LIST_HEAD(&worker->scheduled); 480c34056a3STejun Heo return worker; 481c34056a3STejun Heo } 482c34056a3STejun Heo 483c34056a3STejun Heo /** 484c34056a3STejun Heo * create_worker - create a new workqueue worker 485c34056a3STejun Heo * @cwq: cwq the new worker will belong to 486c34056a3STejun Heo * @bind: whether to set affinity to @cpu or not 487c34056a3STejun Heo * 488c34056a3STejun Heo * Create a new worker which is bound to @cwq. The returned worker 489c34056a3STejun Heo * can be started by calling start_worker() or destroyed using 490c34056a3STejun Heo * destroy_worker(). 491c34056a3STejun Heo * 492c34056a3STejun Heo * CONTEXT: 493c34056a3STejun Heo * Might sleep. Does GFP_KERNEL allocations. 494c34056a3STejun Heo * 495c34056a3STejun Heo * RETURNS: 496c34056a3STejun Heo * Pointer to the newly created worker. 497c34056a3STejun Heo */ 498c34056a3STejun Heo static struct worker *create_worker(struct cpu_workqueue_struct *cwq, bool bind) 499c34056a3STejun Heo { 5008b03ae3cSTejun Heo struct global_cwq *gcwq = cwq->gcwq; 501c34056a3STejun Heo int id = -1; 502c34056a3STejun Heo struct worker *worker = NULL; 503c34056a3STejun Heo 5048b03ae3cSTejun Heo spin_lock_irq(&gcwq->lock); 5058b03ae3cSTejun Heo while (ida_get_new(&gcwq->worker_ida, &id)) { 5068b03ae3cSTejun Heo spin_unlock_irq(&gcwq->lock); 5078b03ae3cSTejun Heo if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL)) 508c34056a3STejun Heo goto fail; 5098b03ae3cSTejun Heo spin_lock_irq(&gcwq->lock); 510c34056a3STejun Heo } 5118b03ae3cSTejun Heo spin_unlock_irq(&gcwq->lock); 512c34056a3STejun Heo 513c34056a3STejun Heo worker = alloc_worker(); 514c34056a3STejun Heo if (!worker) 515c34056a3STejun Heo goto fail; 516c34056a3STejun Heo 5178b03ae3cSTejun Heo worker->gcwq = gcwq; 518c34056a3STejun Heo worker->cwq = cwq; 519c34056a3STejun Heo worker->id = id; 520c34056a3STejun Heo 521c34056a3STejun Heo worker->task = kthread_create(worker_thread, worker, "kworker/%u:%d", 5228b03ae3cSTejun Heo gcwq->cpu, id); 523c34056a3STejun Heo if (IS_ERR(worker->task)) 524c34056a3STejun Heo goto fail; 525c34056a3STejun Heo 526c34056a3STejun Heo if (bind) 5278b03ae3cSTejun Heo kthread_bind(worker->task, gcwq->cpu); 528c34056a3STejun Heo 529c34056a3STejun Heo return worker; 530c34056a3STejun Heo fail: 531c34056a3STejun Heo if (id >= 0) { 5328b03ae3cSTejun Heo spin_lock_irq(&gcwq->lock); 5338b03ae3cSTejun Heo ida_remove(&gcwq->worker_ida, id); 5348b03ae3cSTejun Heo spin_unlock_irq(&gcwq->lock); 535c34056a3STejun Heo } 536c34056a3STejun Heo kfree(worker); 537c34056a3STejun Heo return NULL; 538c34056a3STejun Heo } 539c34056a3STejun Heo 540c34056a3STejun Heo /** 541c34056a3STejun Heo * start_worker - start a newly created worker 542c34056a3STejun Heo * @worker: worker to start 543c34056a3STejun Heo * 544c34056a3STejun Heo * Start @worker. 545c34056a3STejun Heo * 546c34056a3STejun Heo * CONTEXT: 5478b03ae3cSTejun Heo * spin_lock_irq(gcwq->lock). 548c34056a3STejun Heo */ 549c34056a3STejun Heo static void start_worker(struct worker *worker) 550c34056a3STejun Heo { 551c34056a3STejun Heo wake_up_process(worker->task); 552c34056a3STejun Heo } 553c34056a3STejun Heo 554c34056a3STejun Heo /** 555c34056a3STejun Heo * destroy_worker - destroy a workqueue worker 556c34056a3STejun Heo * @worker: worker to be destroyed 557c34056a3STejun Heo * 558c34056a3STejun Heo * Destroy @worker. 559c34056a3STejun Heo */ 560c34056a3STejun Heo static void destroy_worker(struct worker *worker) 561c34056a3STejun Heo { 5628b03ae3cSTejun Heo struct global_cwq *gcwq = worker->gcwq; 563c34056a3STejun Heo int id = worker->id; 564c34056a3STejun Heo 565c34056a3STejun Heo /* sanity check frenzy */ 566c34056a3STejun Heo BUG_ON(worker->current_work); 567affee4b2STejun Heo BUG_ON(!list_empty(&worker->scheduled)); 568c34056a3STejun Heo 569c34056a3STejun Heo kthread_stop(worker->task); 570c34056a3STejun Heo kfree(worker); 571c34056a3STejun Heo 5728b03ae3cSTejun Heo spin_lock_irq(&gcwq->lock); 5738b03ae3cSTejun Heo ida_remove(&gcwq->worker_ida, id); 5748b03ae3cSTejun Heo spin_unlock_irq(&gcwq->lock); 575c34056a3STejun Heo } 576c34056a3STejun Heo 577a62428c0STejun Heo /** 578affee4b2STejun Heo * move_linked_works - move linked works to a list 579affee4b2STejun Heo * @work: start of series of works to be scheduled 580affee4b2STejun Heo * @head: target list to append @work to 581affee4b2STejun Heo * @nextp: out paramter for nested worklist walking 582affee4b2STejun Heo * 583affee4b2STejun Heo * Schedule linked works starting from @work to @head. Work series to 584affee4b2STejun Heo * be scheduled starts at @work and includes any consecutive work with 585affee4b2STejun Heo * WORK_STRUCT_LINKED set in its predecessor. 586affee4b2STejun Heo * 587affee4b2STejun Heo * If @nextp is not NULL, it's updated to point to the next work of 588affee4b2STejun Heo * the last scheduled work. This allows move_linked_works() to be 589affee4b2STejun Heo * nested inside outer list_for_each_entry_safe(). 590affee4b2STejun Heo * 591affee4b2STejun Heo * CONTEXT: 5928b03ae3cSTejun Heo * spin_lock_irq(gcwq->lock). 593affee4b2STejun Heo */ 594affee4b2STejun Heo static void move_linked_works(struct work_struct *work, struct list_head *head, 595affee4b2STejun Heo struct work_struct **nextp) 596affee4b2STejun Heo { 597affee4b2STejun Heo struct work_struct *n; 598affee4b2STejun Heo 599affee4b2STejun Heo /* 600affee4b2STejun Heo * Linked worklist will always end before the end of the list, 601affee4b2STejun Heo * use NULL for list head. 602affee4b2STejun Heo */ 603affee4b2STejun Heo list_for_each_entry_safe_from(work, n, NULL, entry) { 604affee4b2STejun Heo list_move_tail(&work->entry, head); 605affee4b2STejun Heo if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 606affee4b2STejun Heo break; 607affee4b2STejun Heo } 608affee4b2STejun Heo 609affee4b2STejun Heo /* 610affee4b2STejun Heo * If we're already inside safe list traversal and have moved 611affee4b2STejun Heo * multiple works to the scheduled queue, the next position 612affee4b2STejun Heo * needs to be updated. 613affee4b2STejun Heo */ 614affee4b2STejun Heo if (nextp) 615affee4b2STejun Heo *nextp = n; 616affee4b2STejun Heo } 617affee4b2STejun Heo 6181e19ffc6STejun Heo static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) 6191e19ffc6STejun Heo { 6201e19ffc6STejun Heo struct work_struct *work = list_first_entry(&cwq->delayed_works, 6211e19ffc6STejun Heo struct work_struct, entry); 6221e19ffc6STejun Heo 6231e19ffc6STejun Heo move_linked_works(work, &cwq->worklist, NULL); 6241e19ffc6STejun Heo cwq->nr_active++; 6251e19ffc6STejun Heo } 6261e19ffc6STejun Heo 627affee4b2STejun Heo /** 62873f53c4aSTejun Heo * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight 62973f53c4aSTejun Heo * @cwq: cwq of interest 63073f53c4aSTejun Heo * @color: color of work which left the queue 63173f53c4aSTejun Heo * 63273f53c4aSTejun Heo * A work either has completed or is removed from pending queue, 63373f53c4aSTejun Heo * decrement nr_in_flight of its cwq and handle workqueue flushing. 63473f53c4aSTejun Heo * 63573f53c4aSTejun Heo * CONTEXT: 6368b03ae3cSTejun Heo * spin_lock_irq(gcwq->lock). 63773f53c4aSTejun Heo */ 63873f53c4aSTejun Heo static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) 63973f53c4aSTejun Heo { 64073f53c4aSTejun Heo /* ignore uncolored works */ 64173f53c4aSTejun Heo if (color == WORK_NO_COLOR) 64273f53c4aSTejun Heo return; 64373f53c4aSTejun Heo 64473f53c4aSTejun Heo cwq->nr_in_flight[color]--; 6451e19ffc6STejun Heo cwq->nr_active--; 6461e19ffc6STejun Heo 6471e19ffc6STejun Heo /* one down, submit a delayed one */ 6481e19ffc6STejun Heo if (!list_empty(&cwq->delayed_works) && 6491e19ffc6STejun Heo cwq->nr_active < cwq->max_active) 6501e19ffc6STejun Heo cwq_activate_first_delayed(cwq); 65173f53c4aSTejun Heo 65273f53c4aSTejun Heo /* is flush in progress and are we at the flushing tip? */ 65373f53c4aSTejun Heo if (likely(cwq->flush_color != color)) 65473f53c4aSTejun Heo return; 65573f53c4aSTejun Heo 65673f53c4aSTejun Heo /* are there still in-flight works? */ 65773f53c4aSTejun Heo if (cwq->nr_in_flight[color]) 65873f53c4aSTejun Heo return; 65973f53c4aSTejun Heo 66073f53c4aSTejun Heo /* this cwq is done, clear flush_color */ 66173f53c4aSTejun Heo cwq->flush_color = -1; 66273f53c4aSTejun Heo 66373f53c4aSTejun Heo /* 66473f53c4aSTejun Heo * If this was the last cwq, wake up the first flusher. It 66573f53c4aSTejun Heo * will handle the rest. 66673f53c4aSTejun Heo */ 66773f53c4aSTejun Heo if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush)) 66873f53c4aSTejun Heo complete(&cwq->wq->first_flusher->done); 66973f53c4aSTejun Heo } 67073f53c4aSTejun Heo 67173f53c4aSTejun Heo /** 672a62428c0STejun Heo * process_one_work - process single work 673c34056a3STejun Heo * @worker: self 674a62428c0STejun Heo * @work: work to process 675a62428c0STejun Heo * 676a62428c0STejun Heo * Process @work. This function contains all the logics necessary to 677a62428c0STejun Heo * process a single work including synchronization against and 678a62428c0STejun Heo * interaction with other workers on the same cpu, queueing and 679a62428c0STejun Heo * flushing. As long as context requirement is met, any worker can 680a62428c0STejun Heo * call this function to process a work. 681a62428c0STejun Heo * 682a62428c0STejun Heo * CONTEXT: 6838b03ae3cSTejun Heo * spin_lock_irq(gcwq->lock) which is released and regrabbed. 684a62428c0STejun Heo */ 685c34056a3STejun Heo static void process_one_work(struct worker *worker, struct work_struct *work) 6861da177e4SLinus Torvalds { 687c34056a3STejun Heo struct cpu_workqueue_struct *cwq = worker->cwq; 6888b03ae3cSTejun Heo struct global_cwq *gcwq = cwq->gcwq; 6896bb49e59SDavid Howells work_func_t f = work->func; 69073f53c4aSTejun Heo int work_color; 6914e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP 6924e6045f1SJohannes Berg /* 693a62428c0STejun Heo * It is permissible to free the struct work_struct from 694a62428c0STejun Heo * inside the function that is called from it, this we need to 695a62428c0STejun Heo * take into account for lockdep too. To avoid bogus "held 696a62428c0STejun Heo * lock freed" warnings as well as problems when looking into 697a62428c0STejun Heo * work->lockdep_map, make a copy and use that here. 6984e6045f1SJohannes Berg */ 6994e6045f1SJohannes Berg struct lockdep_map lockdep_map = work->lockdep_map; 7004e6045f1SJohannes Berg #endif 701a62428c0STejun Heo /* claim and process */ 702dc186ad7SThomas Gleixner debug_work_deactivate(work); 703c34056a3STejun Heo worker->current_work = work; 70473f53c4aSTejun Heo work_color = get_work_color(work); 705a62428c0STejun Heo list_del_init(&work->entry); 706a62428c0STejun Heo 7078b03ae3cSTejun Heo spin_unlock_irq(&gcwq->lock); 7081da177e4SLinus Torvalds 709365970a1SDavid Howells BUG_ON(get_wq_data(work) != cwq); 71023b2e599SOleg Nesterov work_clear_pending(work); 7113295f0efSIngo Molnar lock_map_acquire(&cwq->wq->lockdep_map); 7123295f0efSIngo Molnar lock_map_acquire(&lockdep_map); 71365f27f38SDavid Howells f(work); 7143295f0efSIngo Molnar lock_map_release(&lockdep_map); 7153295f0efSIngo Molnar lock_map_release(&cwq->wq->lockdep_map); 7161da177e4SLinus Torvalds 717d5abe669SPeter Zijlstra if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 718d5abe669SPeter Zijlstra printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " 719d5abe669SPeter Zijlstra "%s/0x%08x/%d\n", 720a62428c0STejun Heo current->comm, preempt_count(), task_pid_nr(current)); 721d5abe669SPeter Zijlstra printk(KERN_ERR " last function: "); 722d5abe669SPeter Zijlstra print_symbol("%s\n", (unsigned long)f); 723d5abe669SPeter Zijlstra debug_show_held_locks(current); 724d5abe669SPeter Zijlstra dump_stack(); 725d5abe669SPeter Zijlstra } 726d5abe669SPeter Zijlstra 7278b03ae3cSTejun Heo spin_lock_irq(&gcwq->lock); 728a62428c0STejun Heo 729a62428c0STejun Heo /* we're done with it, release */ 730c34056a3STejun Heo worker->current_work = NULL; 73173f53c4aSTejun Heo cwq_dec_nr_in_flight(cwq, work_color); 7321da177e4SLinus Torvalds } 733a62428c0STejun Heo 734affee4b2STejun Heo /** 735affee4b2STejun Heo * process_scheduled_works - process scheduled works 736affee4b2STejun Heo * @worker: self 737affee4b2STejun Heo * 738affee4b2STejun Heo * Process all scheduled works. Please note that the scheduled list 739affee4b2STejun Heo * may change while processing a work, so this function repeatedly 740affee4b2STejun Heo * fetches a work from the top and executes it. 741affee4b2STejun Heo * 742affee4b2STejun Heo * CONTEXT: 7438b03ae3cSTejun Heo * spin_lock_irq(gcwq->lock) which may be released and regrabbed 744affee4b2STejun Heo * multiple times. 745affee4b2STejun Heo */ 746affee4b2STejun Heo static void process_scheduled_works(struct worker *worker) 747a62428c0STejun Heo { 748affee4b2STejun Heo while (!list_empty(&worker->scheduled)) { 749affee4b2STejun Heo struct work_struct *work = list_first_entry(&worker->scheduled, 750a62428c0STejun Heo struct work_struct, entry); 751c34056a3STejun Heo process_one_work(worker, work); 752a62428c0STejun Heo } 7531da177e4SLinus Torvalds } 7541da177e4SLinus Torvalds 7554690c4abSTejun Heo /** 7564690c4abSTejun Heo * worker_thread - the worker thread function 757c34056a3STejun Heo * @__worker: self 7584690c4abSTejun Heo * 7594690c4abSTejun Heo * The cwq worker thread function. 7604690c4abSTejun Heo */ 761c34056a3STejun Heo static int worker_thread(void *__worker) 7621da177e4SLinus Torvalds { 763c34056a3STejun Heo struct worker *worker = __worker; 7648b03ae3cSTejun Heo struct global_cwq *gcwq = worker->gcwq; 765c34056a3STejun Heo struct cpu_workqueue_struct *cwq = worker->cwq; 7663af24433SOleg Nesterov DEFINE_WAIT(wait); 7671da177e4SLinus Torvalds 7683af24433SOleg Nesterov for (;;) { 7693af24433SOleg Nesterov prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE); 770a0a1a5fdSTejun Heo if (!kthread_should_stop() && 77114441960SOleg Nesterov list_empty(&cwq->worklist)) 7721da177e4SLinus Torvalds schedule(); 7733af24433SOleg Nesterov finish_wait(&cwq->more_work, &wait); 7741da177e4SLinus Torvalds 77514441960SOleg Nesterov if (kthread_should_stop()) 7763af24433SOleg Nesterov break; 7773af24433SOleg Nesterov 778c34056a3STejun Heo if (unlikely(!cpumask_equal(&worker->task->cpus_allowed, 7798b03ae3cSTejun Heo get_cpu_mask(gcwq->cpu)))) 780c34056a3STejun Heo set_cpus_allowed_ptr(worker->task, 7818b03ae3cSTejun Heo get_cpu_mask(gcwq->cpu)); 782affee4b2STejun Heo 7838b03ae3cSTejun Heo spin_lock_irq(&gcwq->lock); 784affee4b2STejun Heo 785affee4b2STejun Heo while (!list_empty(&cwq->worklist)) { 786affee4b2STejun Heo struct work_struct *work = 787affee4b2STejun Heo list_first_entry(&cwq->worklist, 788affee4b2STejun Heo struct work_struct, entry); 789affee4b2STejun Heo 790affee4b2STejun Heo if (likely(!(*work_data_bits(work) & 791affee4b2STejun Heo WORK_STRUCT_LINKED))) { 792affee4b2STejun Heo /* optimization path, not strictly necessary */ 793affee4b2STejun Heo process_one_work(worker, work); 794affee4b2STejun Heo if (unlikely(!list_empty(&worker->scheduled))) 795affee4b2STejun Heo process_scheduled_works(worker); 796affee4b2STejun Heo } else { 797affee4b2STejun Heo move_linked_works(work, &worker->scheduled, 798affee4b2STejun Heo NULL); 799affee4b2STejun Heo process_scheduled_works(worker); 800affee4b2STejun Heo } 801affee4b2STejun Heo } 802affee4b2STejun Heo 8038b03ae3cSTejun Heo spin_unlock_irq(&gcwq->lock); 8041da177e4SLinus Torvalds } 8053af24433SOleg Nesterov 8061da177e4SLinus Torvalds return 0; 8071da177e4SLinus Torvalds } 8081da177e4SLinus Torvalds 809fc2e4d70SOleg Nesterov struct wq_barrier { 810fc2e4d70SOleg Nesterov struct work_struct work; 811fc2e4d70SOleg Nesterov struct completion done; 812fc2e4d70SOleg Nesterov }; 813fc2e4d70SOleg Nesterov 814fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work) 815fc2e4d70SOleg Nesterov { 816fc2e4d70SOleg Nesterov struct wq_barrier *barr = container_of(work, struct wq_barrier, work); 817fc2e4d70SOleg Nesterov complete(&barr->done); 818fc2e4d70SOleg Nesterov } 819fc2e4d70SOleg Nesterov 8204690c4abSTejun Heo /** 8214690c4abSTejun Heo * insert_wq_barrier - insert a barrier work 8224690c4abSTejun Heo * @cwq: cwq to insert barrier into 8234690c4abSTejun Heo * @barr: wq_barrier to insert 824affee4b2STejun Heo * @target: target work to attach @barr to 825affee4b2STejun Heo * @worker: worker currently executing @target, NULL if @target is not executing 8264690c4abSTejun Heo * 827affee4b2STejun Heo * @barr is linked to @target such that @barr is completed only after 828affee4b2STejun Heo * @target finishes execution. Please note that the ordering 829affee4b2STejun Heo * guarantee is observed only with respect to @target and on the local 830affee4b2STejun Heo * cpu. 831affee4b2STejun Heo * 832affee4b2STejun Heo * Currently, a queued barrier can't be canceled. This is because 833affee4b2STejun Heo * try_to_grab_pending() can't determine whether the work to be 834affee4b2STejun Heo * grabbed is at the head of the queue and thus can't clear LINKED 835affee4b2STejun Heo * flag of the previous work while there must be a valid next work 836affee4b2STejun Heo * after a work with LINKED flag set. 837affee4b2STejun Heo * 838affee4b2STejun Heo * Note that when @worker is non-NULL, @target may be modified 839affee4b2STejun Heo * underneath us, so we can't reliably determine cwq from @target. 8404690c4abSTejun Heo * 8414690c4abSTejun Heo * CONTEXT: 8428b03ae3cSTejun Heo * spin_lock_irq(gcwq->lock). 8434690c4abSTejun Heo */ 84483c22520SOleg Nesterov static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, 845affee4b2STejun Heo struct wq_barrier *barr, 846affee4b2STejun Heo struct work_struct *target, struct worker *worker) 847fc2e4d70SOleg Nesterov { 848affee4b2STejun Heo struct list_head *head; 849affee4b2STejun Heo unsigned int linked = 0; 850affee4b2STejun Heo 851dc186ad7SThomas Gleixner /* 8528b03ae3cSTejun Heo * debugobject calls are safe here even with gcwq->lock locked 853dc186ad7SThomas Gleixner * as we know for sure that this will not trigger any of the 854dc186ad7SThomas Gleixner * checks and call back into the fixup functions where we 855dc186ad7SThomas Gleixner * might deadlock. 856dc186ad7SThomas Gleixner */ 857dc186ad7SThomas Gleixner INIT_WORK_ON_STACK(&barr->work, wq_barrier_func); 85822df02bbSTejun Heo __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); 859fc2e4d70SOleg Nesterov init_completion(&barr->done); 86083c22520SOleg Nesterov 861affee4b2STejun Heo /* 862affee4b2STejun Heo * If @target is currently being executed, schedule the 863affee4b2STejun Heo * barrier to the worker; otherwise, put it after @target. 864affee4b2STejun Heo */ 865affee4b2STejun Heo if (worker) 866affee4b2STejun Heo head = worker->scheduled.next; 867affee4b2STejun Heo else { 868affee4b2STejun Heo unsigned long *bits = work_data_bits(target); 869affee4b2STejun Heo 870affee4b2STejun Heo head = target->entry.next; 871affee4b2STejun Heo /* there can already be other linked works, inherit and set */ 872affee4b2STejun Heo linked = *bits & WORK_STRUCT_LINKED; 873affee4b2STejun Heo __set_bit(WORK_STRUCT_LINKED_BIT, bits); 874affee4b2STejun Heo } 875affee4b2STejun Heo 876dc186ad7SThomas Gleixner debug_work_activate(&barr->work); 877affee4b2STejun Heo insert_work(cwq, &barr->work, head, 878affee4b2STejun Heo work_color_to_flags(WORK_NO_COLOR) | linked); 879fc2e4d70SOleg Nesterov } 880fc2e4d70SOleg Nesterov 88173f53c4aSTejun Heo /** 88273f53c4aSTejun Heo * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing 88373f53c4aSTejun Heo * @wq: workqueue being flushed 88473f53c4aSTejun Heo * @flush_color: new flush color, < 0 for no-op 88573f53c4aSTejun Heo * @work_color: new work color, < 0 for no-op 88673f53c4aSTejun Heo * 88773f53c4aSTejun Heo * Prepare cwqs for workqueue flushing. 88873f53c4aSTejun Heo * 88973f53c4aSTejun Heo * If @flush_color is non-negative, flush_color on all cwqs should be 89073f53c4aSTejun Heo * -1. If no cwq has in-flight commands at the specified color, all 89173f53c4aSTejun Heo * cwq->flush_color's stay at -1 and %false is returned. If any cwq 89273f53c4aSTejun Heo * has in flight commands, its cwq->flush_color is set to 89373f53c4aSTejun Heo * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq 89473f53c4aSTejun Heo * wakeup logic is armed and %true is returned. 89573f53c4aSTejun Heo * 89673f53c4aSTejun Heo * The caller should have initialized @wq->first_flusher prior to 89773f53c4aSTejun Heo * calling this function with non-negative @flush_color. If 89873f53c4aSTejun Heo * @flush_color is negative, no flush color update is done and %false 89973f53c4aSTejun Heo * is returned. 90073f53c4aSTejun Heo * 90173f53c4aSTejun Heo * If @work_color is non-negative, all cwqs should have the same 90273f53c4aSTejun Heo * work_color which is previous to @work_color and all will be 90373f53c4aSTejun Heo * advanced to @work_color. 90473f53c4aSTejun Heo * 90573f53c4aSTejun Heo * CONTEXT: 90673f53c4aSTejun Heo * mutex_lock(wq->flush_mutex). 90773f53c4aSTejun Heo * 90873f53c4aSTejun Heo * RETURNS: 90973f53c4aSTejun Heo * %true if @flush_color >= 0 and there's something to flush. %false 91073f53c4aSTejun Heo * otherwise. 91173f53c4aSTejun Heo */ 91273f53c4aSTejun Heo static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq, 91373f53c4aSTejun Heo int flush_color, int work_color) 9141da177e4SLinus Torvalds { 91573f53c4aSTejun Heo bool wait = false; 91673f53c4aSTejun Heo unsigned int cpu; 9171da177e4SLinus Torvalds 91873f53c4aSTejun Heo if (flush_color >= 0) { 91973f53c4aSTejun Heo BUG_ON(atomic_read(&wq->nr_cwqs_to_flush)); 92073f53c4aSTejun Heo atomic_set(&wq->nr_cwqs_to_flush, 1); 92173f53c4aSTejun Heo } 92273f53c4aSTejun Heo 92373f53c4aSTejun Heo for_each_possible_cpu(cpu) { 92473f53c4aSTejun Heo struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 9258b03ae3cSTejun Heo struct global_cwq *gcwq = cwq->gcwq; 9262355b70fSLai Jiangshan 9278b03ae3cSTejun Heo spin_lock_irq(&gcwq->lock); 92873f53c4aSTejun Heo 92973f53c4aSTejun Heo if (flush_color >= 0) { 93073f53c4aSTejun Heo BUG_ON(cwq->flush_color != -1); 93173f53c4aSTejun Heo 93273f53c4aSTejun Heo if (cwq->nr_in_flight[flush_color]) { 93373f53c4aSTejun Heo cwq->flush_color = flush_color; 93473f53c4aSTejun Heo atomic_inc(&wq->nr_cwqs_to_flush); 93573f53c4aSTejun Heo wait = true; 93683c22520SOleg Nesterov } 93773f53c4aSTejun Heo } 93873f53c4aSTejun Heo 93973f53c4aSTejun Heo if (work_color >= 0) { 94073f53c4aSTejun Heo BUG_ON(work_color != work_next_color(cwq->work_color)); 94173f53c4aSTejun Heo cwq->work_color = work_color; 94273f53c4aSTejun Heo } 94373f53c4aSTejun Heo 9448b03ae3cSTejun Heo spin_unlock_irq(&gcwq->lock); 945dc186ad7SThomas Gleixner } 94614441960SOleg Nesterov 94773f53c4aSTejun Heo if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush)) 94873f53c4aSTejun Heo complete(&wq->first_flusher->done); 94973f53c4aSTejun Heo 95073f53c4aSTejun Heo return wait; 95183c22520SOleg Nesterov } 9521da177e4SLinus Torvalds 9530fcb78c2SRolf Eike Beer /** 9541da177e4SLinus Torvalds * flush_workqueue - ensure that any scheduled work has run to completion. 9550fcb78c2SRolf Eike Beer * @wq: workqueue to flush 9561da177e4SLinus Torvalds * 9571da177e4SLinus Torvalds * Forces execution of the workqueue and blocks until its completion. 9581da177e4SLinus Torvalds * This is typically used in driver shutdown handlers. 9591da177e4SLinus Torvalds * 960fc2e4d70SOleg Nesterov * We sleep until all works which were queued on entry have been handled, 961fc2e4d70SOleg Nesterov * but we are not livelocked by new incoming ones. 9621da177e4SLinus Torvalds */ 9637ad5b3a5SHarvey Harrison void flush_workqueue(struct workqueue_struct *wq) 9641da177e4SLinus Torvalds { 96573f53c4aSTejun Heo struct wq_flusher this_flusher = { 96673f53c4aSTejun Heo .list = LIST_HEAD_INIT(this_flusher.list), 96773f53c4aSTejun Heo .flush_color = -1, 96873f53c4aSTejun Heo .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done), 96973f53c4aSTejun Heo }; 97073f53c4aSTejun Heo int next_color; 971b1f4ec17SOleg Nesterov 9723295f0efSIngo Molnar lock_map_acquire(&wq->lockdep_map); 9733295f0efSIngo Molnar lock_map_release(&wq->lockdep_map); 97473f53c4aSTejun Heo 97573f53c4aSTejun Heo mutex_lock(&wq->flush_mutex); 97673f53c4aSTejun Heo 97773f53c4aSTejun Heo /* 97873f53c4aSTejun Heo * Start-to-wait phase 97973f53c4aSTejun Heo */ 98073f53c4aSTejun Heo next_color = work_next_color(wq->work_color); 98173f53c4aSTejun Heo 98273f53c4aSTejun Heo if (next_color != wq->flush_color) { 98373f53c4aSTejun Heo /* 98473f53c4aSTejun Heo * Color space is not full. The current work_color 98573f53c4aSTejun Heo * becomes our flush_color and work_color is advanced 98673f53c4aSTejun Heo * by one. 98773f53c4aSTejun Heo */ 98873f53c4aSTejun Heo BUG_ON(!list_empty(&wq->flusher_overflow)); 98973f53c4aSTejun Heo this_flusher.flush_color = wq->work_color; 99073f53c4aSTejun Heo wq->work_color = next_color; 99173f53c4aSTejun Heo 99273f53c4aSTejun Heo if (!wq->first_flusher) { 99373f53c4aSTejun Heo /* no flush in progress, become the first flusher */ 99473f53c4aSTejun Heo BUG_ON(wq->flush_color != this_flusher.flush_color); 99573f53c4aSTejun Heo 99673f53c4aSTejun Heo wq->first_flusher = &this_flusher; 99773f53c4aSTejun Heo 99873f53c4aSTejun Heo if (!flush_workqueue_prep_cwqs(wq, wq->flush_color, 99973f53c4aSTejun Heo wq->work_color)) { 100073f53c4aSTejun Heo /* nothing to flush, done */ 100173f53c4aSTejun Heo wq->flush_color = next_color; 100273f53c4aSTejun Heo wq->first_flusher = NULL; 100373f53c4aSTejun Heo goto out_unlock; 100473f53c4aSTejun Heo } 100573f53c4aSTejun Heo } else { 100673f53c4aSTejun Heo /* wait in queue */ 100773f53c4aSTejun Heo BUG_ON(wq->flush_color == this_flusher.flush_color); 100873f53c4aSTejun Heo list_add_tail(&this_flusher.list, &wq->flusher_queue); 100973f53c4aSTejun Heo flush_workqueue_prep_cwqs(wq, -1, wq->work_color); 101073f53c4aSTejun Heo } 101173f53c4aSTejun Heo } else { 101273f53c4aSTejun Heo /* 101373f53c4aSTejun Heo * Oops, color space is full, wait on overflow queue. 101473f53c4aSTejun Heo * The next flush completion will assign us 101573f53c4aSTejun Heo * flush_color and transfer to flusher_queue. 101673f53c4aSTejun Heo */ 101773f53c4aSTejun Heo list_add_tail(&this_flusher.list, &wq->flusher_overflow); 101873f53c4aSTejun Heo } 101973f53c4aSTejun Heo 102073f53c4aSTejun Heo mutex_unlock(&wq->flush_mutex); 102173f53c4aSTejun Heo 102273f53c4aSTejun Heo wait_for_completion(&this_flusher.done); 102373f53c4aSTejun Heo 102473f53c4aSTejun Heo /* 102573f53c4aSTejun Heo * Wake-up-and-cascade phase 102673f53c4aSTejun Heo * 102773f53c4aSTejun Heo * First flushers are responsible for cascading flushes and 102873f53c4aSTejun Heo * handling overflow. Non-first flushers can simply return. 102973f53c4aSTejun Heo */ 103073f53c4aSTejun Heo if (wq->first_flusher != &this_flusher) 103173f53c4aSTejun Heo return; 103273f53c4aSTejun Heo 103373f53c4aSTejun Heo mutex_lock(&wq->flush_mutex); 103473f53c4aSTejun Heo 103573f53c4aSTejun Heo wq->first_flusher = NULL; 103673f53c4aSTejun Heo 103773f53c4aSTejun Heo BUG_ON(!list_empty(&this_flusher.list)); 103873f53c4aSTejun Heo BUG_ON(wq->flush_color != this_flusher.flush_color); 103973f53c4aSTejun Heo 104073f53c4aSTejun Heo while (true) { 104173f53c4aSTejun Heo struct wq_flusher *next, *tmp; 104273f53c4aSTejun Heo 104373f53c4aSTejun Heo /* complete all the flushers sharing the current flush color */ 104473f53c4aSTejun Heo list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { 104573f53c4aSTejun Heo if (next->flush_color != wq->flush_color) 104673f53c4aSTejun Heo break; 104773f53c4aSTejun Heo list_del_init(&next->list); 104873f53c4aSTejun Heo complete(&next->done); 104973f53c4aSTejun Heo } 105073f53c4aSTejun Heo 105173f53c4aSTejun Heo BUG_ON(!list_empty(&wq->flusher_overflow) && 105273f53c4aSTejun Heo wq->flush_color != work_next_color(wq->work_color)); 105373f53c4aSTejun Heo 105473f53c4aSTejun Heo /* this flush_color is finished, advance by one */ 105573f53c4aSTejun Heo wq->flush_color = work_next_color(wq->flush_color); 105673f53c4aSTejun Heo 105773f53c4aSTejun Heo /* one color has been freed, handle overflow queue */ 105873f53c4aSTejun Heo if (!list_empty(&wq->flusher_overflow)) { 105973f53c4aSTejun Heo /* 106073f53c4aSTejun Heo * Assign the same color to all overflowed 106173f53c4aSTejun Heo * flushers, advance work_color and append to 106273f53c4aSTejun Heo * flusher_queue. This is the start-to-wait 106373f53c4aSTejun Heo * phase for these overflowed flushers. 106473f53c4aSTejun Heo */ 106573f53c4aSTejun Heo list_for_each_entry(tmp, &wq->flusher_overflow, list) 106673f53c4aSTejun Heo tmp->flush_color = wq->work_color; 106773f53c4aSTejun Heo 106873f53c4aSTejun Heo wq->work_color = work_next_color(wq->work_color); 106973f53c4aSTejun Heo 107073f53c4aSTejun Heo list_splice_tail_init(&wq->flusher_overflow, 107173f53c4aSTejun Heo &wq->flusher_queue); 107273f53c4aSTejun Heo flush_workqueue_prep_cwqs(wq, -1, wq->work_color); 107373f53c4aSTejun Heo } 107473f53c4aSTejun Heo 107573f53c4aSTejun Heo if (list_empty(&wq->flusher_queue)) { 107673f53c4aSTejun Heo BUG_ON(wq->flush_color != wq->work_color); 107773f53c4aSTejun Heo break; 107873f53c4aSTejun Heo } 107973f53c4aSTejun Heo 108073f53c4aSTejun Heo /* 108173f53c4aSTejun Heo * Need to flush more colors. Make the next flusher 108273f53c4aSTejun Heo * the new first flusher and arm cwqs. 108373f53c4aSTejun Heo */ 108473f53c4aSTejun Heo BUG_ON(wq->flush_color == wq->work_color); 108573f53c4aSTejun Heo BUG_ON(wq->flush_color != next->flush_color); 108673f53c4aSTejun Heo 108773f53c4aSTejun Heo list_del_init(&next->list); 108873f53c4aSTejun Heo wq->first_flusher = next; 108973f53c4aSTejun Heo 109073f53c4aSTejun Heo if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1)) 109173f53c4aSTejun Heo break; 109273f53c4aSTejun Heo 109373f53c4aSTejun Heo /* 109473f53c4aSTejun Heo * Meh... this color is already done, clear first 109573f53c4aSTejun Heo * flusher and repeat cascading. 109673f53c4aSTejun Heo */ 109773f53c4aSTejun Heo wq->first_flusher = NULL; 109873f53c4aSTejun Heo } 109973f53c4aSTejun Heo 110073f53c4aSTejun Heo out_unlock: 110173f53c4aSTejun Heo mutex_unlock(&wq->flush_mutex); 11021da177e4SLinus Torvalds } 1103ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue); 11041da177e4SLinus Torvalds 1105db700897SOleg Nesterov /** 1106db700897SOleg Nesterov * flush_work - block until a work_struct's callback has terminated 1107db700897SOleg Nesterov * @work: the work which is to be flushed 1108db700897SOleg Nesterov * 1109a67da70dSOleg Nesterov * Returns false if @work has already terminated. 1110a67da70dSOleg Nesterov * 1111db700897SOleg Nesterov * It is expected that, prior to calling flush_work(), the caller has 1112db700897SOleg Nesterov * arranged for the work to not be requeued, otherwise it doesn't make 1113db700897SOleg Nesterov * sense to use this function. 1114db700897SOleg Nesterov */ 1115db700897SOleg Nesterov int flush_work(struct work_struct *work) 1116db700897SOleg Nesterov { 1117affee4b2STejun Heo struct worker *worker = NULL; 1118db700897SOleg Nesterov struct cpu_workqueue_struct *cwq; 11198b03ae3cSTejun Heo struct global_cwq *gcwq; 1120db700897SOleg Nesterov struct wq_barrier barr; 1121db700897SOleg Nesterov 1122db700897SOleg Nesterov might_sleep(); 1123db700897SOleg Nesterov cwq = get_wq_data(work); 1124db700897SOleg Nesterov if (!cwq) 1125db700897SOleg Nesterov return 0; 11268b03ae3cSTejun Heo gcwq = cwq->gcwq; 1127db700897SOleg Nesterov 11283295f0efSIngo Molnar lock_map_acquire(&cwq->wq->lockdep_map); 11293295f0efSIngo Molnar lock_map_release(&cwq->wq->lockdep_map); 1130a67da70dSOleg Nesterov 11318b03ae3cSTejun Heo spin_lock_irq(&gcwq->lock); 1132db700897SOleg Nesterov if (!list_empty(&work->entry)) { 1133db700897SOleg Nesterov /* 1134db700897SOleg Nesterov * See the comment near try_to_grab_pending()->smp_rmb(). 1135db700897SOleg Nesterov * If it was re-queued under us we are not going to wait. 1136db700897SOleg Nesterov */ 1137db700897SOleg Nesterov smp_rmb(); 1138db700897SOleg Nesterov if (unlikely(cwq != get_wq_data(work))) 11394690c4abSTejun Heo goto already_gone; 1140db700897SOleg Nesterov } else { 1141affee4b2STejun Heo if (cwq->worker && cwq->worker->current_work == work) 1142affee4b2STejun Heo worker = cwq->worker; 1143affee4b2STejun Heo if (!worker) 11444690c4abSTejun Heo goto already_gone; 1145db700897SOleg Nesterov } 1146db700897SOleg Nesterov 1147affee4b2STejun Heo insert_wq_barrier(cwq, &barr, work, worker); 11488b03ae3cSTejun Heo spin_unlock_irq(&gcwq->lock); 1149db700897SOleg Nesterov wait_for_completion(&barr.done); 1150dc186ad7SThomas Gleixner destroy_work_on_stack(&barr.work); 1151db700897SOleg Nesterov return 1; 11524690c4abSTejun Heo already_gone: 11538b03ae3cSTejun Heo spin_unlock_irq(&gcwq->lock); 11544690c4abSTejun Heo return 0; 1155db700897SOleg Nesterov } 1156db700897SOleg Nesterov EXPORT_SYMBOL_GPL(flush_work); 1157db700897SOleg Nesterov 11586e84d644SOleg Nesterov /* 11591f1f642eSOleg Nesterov * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, 11606e84d644SOleg Nesterov * so this work can't be re-armed in any way. 11616e84d644SOleg Nesterov */ 11626e84d644SOleg Nesterov static int try_to_grab_pending(struct work_struct *work) 11636e84d644SOleg Nesterov { 11648b03ae3cSTejun Heo struct global_cwq *gcwq; 11656e84d644SOleg Nesterov struct cpu_workqueue_struct *cwq; 11661f1f642eSOleg Nesterov int ret = -1; 11676e84d644SOleg Nesterov 116822df02bbSTejun Heo if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) 11691f1f642eSOleg Nesterov return 0; 11706e84d644SOleg Nesterov 11716e84d644SOleg Nesterov /* 11726e84d644SOleg Nesterov * The queueing is in progress, or it is already queued. Try to 11736e84d644SOleg Nesterov * steal it from ->worklist without clearing WORK_STRUCT_PENDING. 11746e84d644SOleg Nesterov */ 11756e84d644SOleg Nesterov 11766e84d644SOleg Nesterov cwq = get_wq_data(work); 11776e84d644SOleg Nesterov if (!cwq) 11786e84d644SOleg Nesterov return ret; 11798b03ae3cSTejun Heo gcwq = cwq->gcwq; 11806e84d644SOleg Nesterov 11818b03ae3cSTejun Heo spin_lock_irq(&gcwq->lock); 11826e84d644SOleg Nesterov if (!list_empty(&work->entry)) { 11836e84d644SOleg Nesterov /* 11846e84d644SOleg Nesterov * This work is queued, but perhaps we locked the wrong cwq. 11856e84d644SOleg Nesterov * In that case we must see the new value after rmb(), see 11866e84d644SOleg Nesterov * insert_work()->wmb(). 11876e84d644SOleg Nesterov */ 11886e84d644SOleg Nesterov smp_rmb(); 11896e84d644SOleg Nesterov if (cwq == get_wq_data(work)) { 1190dc186ad7SThomas Gleixner debug_work_deactivate(work); 11916e84d644SOleg Nesterov list_del_init(&work->entry); 119273f53c4aSTejun Heo cwq_dec_nr_in_flight(cwq, get_work_color(work)); 11936e84d644SOleg Nesterov ret = 1; 11946e84d644SOleg Nesterov } 11956e84d644SOleg Nesterov } 11968b03ae3cSTejun Heo spin_unlock_irq(&gcwq->lock); 11976e84d644SOleg Nesterov 11986e84d644SOleg Nesterov return ret; 11996e84d644SOleg Nesterov } 12006e84d644SOleg Nesterov 12016e84d644SOleg Nesterov static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, 1202b89deed3SOleg Nesterov struct work_struct *work) 1203b89deed3SOleg Nesterov { 12048b03ae3cSTejun Heo struct global_cwq *gcwq = cwq->gcwq; 1205b89deed3SOleg Nesterov struct wq_barrier barr; 1206affee4b2STejun Heo struct worker *worker; 1207b89deed3SOleg Nesterov 12088b03ae3cSTejun Heo spin_lock_irq(&gcwq->lock); 1209affee4b2STejun Heo 1210affee4b2STejun Heo worker = NULL; 1211c34056a3STejun Heo if (unlikely(cwq->worker && cwq->worker->current_work == work)) { 1212affee4b2STejun Heo worker = cwq->worker; 1213affee4b2STejun Heo insert_wq_barrier(cwq, &barr, work, worker); 1214b89deed3SOleg Nesterov } 1215affee4b2STejun Heo 12168b03ae3cSTejun Heo spin_unlock_irq(&gcwq->lock); 1217b89deed3SOleg Nesterov 1218affee4b2STejun Heo if (unlikely(worker)) { 1219b89deed3SOleg Nesterov wait_for_completion(&barr.done); 1220dc186ad7SThomas Gleixner destroy_work_on_stack(&barr.work); 1221dc186ad7SThomas Gleixner } 1222b89deed3SOleg Nesterov } 1223b89deed3SOleg Nesterov 12246e84d644SOleg Nesterov static void wait_on_work(struct work_struct *work) 1225b89deed3SOleg Nesterov { 1226b89deed3SOleg Nesterov struct cpu_workqueue_struct *cwq; 122728e53bddSOleg Nesterov struct workqueue_struct *wq; 1228b1f4ec17SOleg Nesterov int cpu; 1229b89deed3SOleg Nesterov 1230f293ea92SOleg Nesterov might_sleep(); 1231f293ea92SOleg Nesterov 12323295f0efSIngo Molnar lock_map_acquire(&work->lockdep_map); 12333295f0efSIngo Molnar lock_map_release(&work->lockdep_map); 12344e6045f1SJohannes Berg 1235b89deed3SOleg Nesterov cwq = get_wq_data(work); 1236b89deed3SOleg Nesterov if (!cwq) 12373af24433SOleg Nesterov return; 1238b89deed3SOleg Nesterov 123928e53bddSOleg Nesterov wq = cwq->wq; 124028e53bddSOleg Nesterov 12411537663fSTejun Heo for_each_possible_cpu(cpu) 12424690c4abSTejun Heo wait_on_cpu_work(get_cwq(cpu, wq), work); 12436e84d644SOleg Nesterov } 12446e84d644SOleg Nesterov 12451f1f642eSOleg Nesterov static int __cancel_work_timer(struct work_struct *work, 12461f1f642eSOleg Nesterov struct timer_list* timer) 12471f1f642eSOleg Nesterov { 12481f1f642eSOleg Nesterov int ret; 12491f1f642eSOleg Nesterov 12501f1f642eSOleg Nesterov do { 12511f1f642eSOleg Nesterov ret = (timer && likely(del_timer(timer))); 12521f1f642eSOleg Nesterov if (!ret) 12531f1f642eSOleg Nesterov ret = try_to_grab_pending(work); 12541f1f642eSOleg Nesterov wait_on_work(work); 12551f1f642eSOleg Nesterov } while (unlikely(ret < 0)); 12561f1f642eSOleg Nesterov 12574d707b9fSOleg Nesterov clear_wq_data(work); 12581f1f642eSOleg Nesterov return ret; 12591f1f642eSOleg Nesterov } 12601f1f642eSOleg Nesterov 12616e84d644SOleg Nesterov /** 12626e84d644SOleg Nesterov * cancel_work_sync - block until a work_struct's callback has terminated 12636e84d644SOleg Nesterov * @work: the work which is to be flushed 12646e84d644SOleg Nesterov * 12651f1f642eSOleg Nesterov * Returns true if @work was pending. 12661f1f642eSOleg Nesterov * 12676e84d644SOleg Nesterov * cancel_work_sync() will cancel the work if it is queued. If the work's 12686e84d644SOleg Nesterov * callback appears to be running, cancel_work_sync() will block until it 12696e84d644SOleg Nesterov * has completed. 12706e84d644SOleg Nesterov * 12716e84d644SOleg Nesterov * It is possible to use this function if the work re-queues itself. It can 12726e84d644SOleg Nesterov * cancel the work even if it migrates to another workqueue, however in that 12736e84d644SOleg Nesterov * case it only guarantees that work->func() has completed on the last queued 12746e84d644SOleg Nesterov * workqueue. 12756e84d644SOleg Nesterov * 12766e84d644SOleg Nesterov * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not 12776e84d644SOleg Nesterov * pending, otherwise it goes into a busy-wait loop until the timer expires. 12786e84d644SOleg Nesterov * 12796e84d644SOleg Nesterov * The caller must ensure that workqueue_struct on which this work was last 12806e84d644SOleg Nesterov * queued can't be destroyed before this function returns. 12816e84d644SOleg Nesterov */ 12821f1f642eSOleg Nesterov int cancel_work_sync(struct work_struct *work) 12836e84d644SOleg Nesterov { 12841f1f642eSOleg Nesterov return __cancel_work_timer(work, NULL); 1285b89deed3SOleg Nesterov } 128628e53bddSOleg Nesterov EXPORT_SYMBOL_GPL(cancel_work_sync); 1287b89deed3SOleg Nesterov 12886e84d644SOleg Nesterov /** 1289f5a421a4SOleg Nesterov * cancel_delayed_work_sync - reliably kill off a delayed work. 12906e84d644SOleg Nesterov * @dwork: the delayed work struct 12916e84d644SOleg Nesterov * 12921f1f642eSOleg Nesterov * Returns true if @dwork was pending. 12931f1f642eSOleg Nesterov * 12946e84d644SOleg Nesterov * It is possible to use this function if @dwork rearms itself via queue_work() 12956e84d644SOleg Nesterov * or queue_delayed_work(). See also the comment for cancel_work_sync(). 12966e84d644SOleg Nesterov */ 12971f1f642eSOleg Nesterov int cancel_delayed_work_sync(struct delayed_work *dwork) 12986e84d644SOleg Nesterov { 12991f1f642eSOleg Nesterov return __cancel_work_timer(&dwork->work, &dwork->timer); 13006e84d644SOleg Nesterov } 1301f5a421a4SOleg Nesterov EXPORT_SYMBOL(cancel_delayed_work_sync); 13021da177e4SLinus Torvalds 13036e84d644SOleg Nesterov static struct workqueue_struct *keventd_wq __read_mostly; 13041da177e4SLinus Torvalds 13050fcb78c2SRolf Eike Beer /** 13060fcb78c2SRolf Eike Beer * schedule_work - put work task in global workqueue 13070fcb78c2SRolf Eike Beer * @work: job to be done 13080fcb78c2SRolf Eike Beer * 13095b0f437dSBart Van Assche * Returns zero if @work was already on the kernel-global workqueue and 13105b0f437dSBart Van Assche * non-zero otherwise. 13115b0f437dSBart Van Assche * 13125b0f437dSBart Van Assche * This puts a job in the kernel-global workqueue if it was not already 13135b0f437dSBart Van Assche * queued and leaves it in the same position on the kernel-global 13145b0f437dSBart Van Assche * workqueue otherwise. 13150fcb78c2SRolf Eike Beer */ 13167ad5b3a5SHarvey Harrison int schedule_work(struct work_struct *work) 13171da177e4SLinus Torvalds { 13181da177e4SLinus Torvalds return queue_work(keventd_wq, work); 13191da177e4SLinus Torvalds } 1320ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work); 13211da177e4SLinus Torvalds 1322c1a220e7SZhang Rui /* 1323c1a220e7SZhang Rui * schedule_work_on - put work task on a specific cpu 1324c1a220e7SZhang Rui * @cpu: cpu to put the work task on 1325c1a220e7SZhang Rui * @work: job to be done 1326c1a220e7SZhang Rui * 1327c1a220e7SZhang Rui * This puts a job on a specific cpu 1328c1a220e7SZhang Rui */ 1329c1a220e7SZhang Rui int schedule_work_on(int cpu, struct work_struct *work) 1330c1a220e7SZhang Rui { 1331c1a220e7SZhang Rui return queue_work_on(cpu, keventd_wq, work); 1332c1a220e7SZhang Rui } 1333c1a220e7SZhang Rui EXPORT_SYMBOL(schedule_work_on); 1334c1a220e7SZhang Rui 13350fcb78c2SRolf Eike Beer /** 13360fcb78c2SRolf Eike Beer * schedule_delayed_work - put work task in global workqueue after delay 133752bad64dSDavid Howells * @dwork: job to be done 133852bad64dSDavid Howells * @delay: number of jiffies to wait or 0 for immediate execution 13390fcb78c2SRolf Eike Beer * 13400fcb78c2SRolf Eike Beer * After waiting for a given time this puts a job in the kernel-global 13410fcb78c2SRolf Eike Beer * workqueue. 13420fcb78c2SRolf Eike Beer */ 13437ad5b3a5SHarvey Harrison int schedule_delayed_work(struct delayed_work *dwork, 134482f67cd9SIngo Molnar unsigned long delay) 13451da177e4SLinus Torvalds { 134652bad64dSDavid Howells return queue_delayed_work(keventd_wq, dwork, delay); 13471da177e4SLinus Torvalds } 1348ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work); 13491da177e4SLinus Torvalds 13500fcb78c2SRolf Eike Beer /** 13518c53e463SLinus Torvalds * flush_delayed_work - block until a dwork_struct's callback has terminated 13528c53e463SLinus Torvalds * @dwork: the delayed work which is to be flushed 13538c53e463SLinus Torvalds * 13548c53e463SLinus Torvalds * Any timeout is cancelled, and any pending work is run immediately. 13558c53e463SLinus Torvalds */ 13568c53e463SLinus Torvalds void flush_delayed_work(struct delayed_work *dwork) 13578c53e463SLinus Torvalds { 13588c53e463SLinus Torvalds if (del_timer_sync(&dwork->timer)) { 13594690c4abSTejun Heo __queue_work(get_cpu(), get_wq_data(&dwork->work)->wq, 13604690c4abSTejun Heo &dwork->work); 13618c53e463SLinus Torvalds put_cpu(); 13628c53e463SLinus Torvalds } 13638c53e463SLinus Torvalds flush_work(&dwork->work); 13648c53e463SLinus Torvalds } 13658c53e463SLinus Torvalds EXPORT_SYMBOL(flush_delayed_work); 13668c53e463SLinus Torvalds 13678c53e463SLinus Torvalds /** 13680fcb78c2SRolf Eike Beer * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 13690fcb78c2SRolf Eike Beer * @cpu: cpu to use 137052bad64dSDavid Howells * @dwork: job to be done 13710fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait 13720fcb78c2SRolf Eike Beer * 13730fcb78c2SRolf Eike Beer * After waiting for a given time this puts a job in the kernel-global 13740fcb78c2SRolf Eike Beer * workqueue on the specified CPU. 13750fcb78c2SRolf Eike Beer */ 13761da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu, 137752bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 13781da177e4SLinus Torvalds { 137952bad64dSDavid Howells return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); 13801da177e4SLinus Torvalds } 1381ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on); 13821da177e4SLinus Torvalds 1383b6136773SAndrew Morton /** 1384b6136773SAndrew Morton * schedule_on_each_cpu - call a function on each online CPU from keventd 1385b6136773SAndrew Morton * @func: the function to call 1386b6136773SAndrew Morton * 1387b6136773SAndrew Morton * Returns zero on success. 1388b6136773SAndrew Morton * Returns -ve errno on failure. 1389b6136773SAndrew Morton * 1390b6136773SAndrew Morton * schedule_on_each_cpu() is very slow. 1391b6136773SAndrew Morton */ 139265f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func) 139315316ba8SChristoph Lameter { 139415316ba8SChristoph Lameter int cpu; 139565a64464SAndi Kleen int orig = -1; 1396b6136773SAndrew Morton struct work_struct *works; 139715316ba8SChristoph Lameter 1398b6136773SAndrew Morton works = alloc_percpu(struct work_struct); 1399b6136773SAndrew Morton if (!works) 140015316ba8SChristoph Lameter return -ENOMEM; 1401b6136773SAndrew Morton 140295402b38SGautham R Shenoy get_online_cpus(); 140393981800STejun Heo 140493981800STejun Heo /* 140593981800STejun Heo * When running in keventd don't schedule a work item on 140693981800STejun Heo * itself. Can just call directly because the work queue is 140793981800STejun Heo * already bound. This also is faster. 140893981800STejun Heo */ 140993981800STejun Heo if (current_is_keventd()) 141093981800STejun Heo orig = raw_smp_processor_id(); 141193981800STejun Heo 141215316ba8SChristoph Lameter for_each_online_cpu(cpu) { 14139bfb1839SIngo Molnar struct work_struct *work = per_cpu_ptr(works, cpu); 14149bfb1839SIngo Molnar 14159bfb1839SIngo Molnar INIT_WORK(work, func); 141693981800STejun Heo if (cpu != orig) 14178de6d308SOleg Nesterov schedule_work_on(cpu, work); 141815316ba8SChristoph Lameter } 141993981800STejun Heo if (orig >= 0) 142093981800STejun Heo func(per_cpu_ptr(works, orig)); 142193981800STejun Heo 142293981800STejun Heo for_each_online_cpu(cpu) 14238616a89aSOleg Nesterov flush_work(per_cpu_ptr(works, cpu)); 142493981800STejun Heo 142595402b38SGautham R Shenoy put_online_cpus(); 1426b6136773SAndrew Morton free_percpu(works); 142715316ba8SChristoph Lameter return 0; 142815316ba8SChristoph Lameter } 142915316ba8SChristoph Lameter 1430eef6a7d5SAlan Stern /** 1431eef6a7d5SAlan Stern * flush_scheduled_work - ensure that any scheduled work has run to completion. 1432eef6a7d5SAlan Stern * 1433eef6a7d5SAlan Stern * Forces execution of the kernel-global workqueue and blocks until its 1434eef6a7d5SAlan Stern * completion. 1435eef6a7d5SAlan Stern * 1436eef6a7d5SAlan Stern * Think twice before calling this function! It's very easy to get into 1437eef6a7d5SAlan Stern * trouble if you don't take great care. Either of the following situations 1438eef6a7d5SAlan Stern * will lead to deadlock: 1439eef6a7d5SAlan Stern * 1440eef6a7d5SAlan Stern * One of the work items currently on the workqueue needs to acquire 1441eef6a7d5SAlan Stern * a lock held by your code or its caller. 1442eef6a7d5SAlan Stern * 1443eef6a7d5SAlan Stern * Your code is running in the context of a work routine. 1444eef6a7d5SAlan Stern * 1445eef6a7d5SAlan Stern * They will be detected by lockdep when they occur, but the first might not 1446eef6a7d5SAlan Stern * occur very often. It depends on what work items are on the workqueue and 1447eef6a7d5SAlan Stern * what locks they need, which you have no control over. 1448eef6a7d5SAlan Stern * 1449eef6a7d5SAlan Stern * In most situations flushing the entire workqueue is overkill; you merely 1450eef6a7d5SAlan Stern * need to know that a particular work item isn't queued and isn't running. 1451eef6a7d5SAlan Stern * In such cases you should use cancel_delayed_work_sync() or 1452eef6a7d5SAlan Stern * cancel_work_sync() instead. 1453eef6a7d5SAlan Stern */ 14541da177e4SLinus Torvalds void flush_scheduled_work(void) 14551da177e4SLinus Torvalds { 14561da177e4SLinus Torvalds flush_workqueue(keventd_wq); 14571da177e4SLinus Torvalds } 1458ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work); 14591da177e4SLinus Torvalds 14601da177e4SLinus Torvalds /** 14611fa44ecaSJames Bottomley * execute_in_process_context - reliably execute the routine with user context 14621fa44ecaSJames Bottomley * @fn: the function to execute 14631fa44ecaSJames Bottomley * @ew: guaranteed storage for the execute work structure (must 14641fa44ecaSJames Bottomley * be available when the work executes) 14651fa44ecaSJames Bottomley * 14661fa44ecaSJames Bottomley * Executes the function immediately if process context is available, 14671fa44ecaSJames Bottomley * otherwise schedules the function for delayed execution. 14681fa44ecaSJames Bottomley * 14691fa44ecaSJames Bottomley * Returns: 0 - function was executed 14701fa44ecaSJames Bottomley * 1 - function was scheduled for execution 14711fa44ecaSJames Bottomley */ 147265f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew) 14731fa44ecaSJames Bottomley { 14741fa44ecaSJames Bottomley if (!in_interrupt()) { 147565f27f38SDavid Howells fn(&ew->work); 14761fa44ecaSJames Bottomley return 0; 14771fa44ecaSJames Bottomley } 14781fa44ecaSJames Bottomley 147965f27f38SDavid Howells INIT_WORK(&ew->work, fn); 14801fa44ecaSJames Bottomley schedule_work(&ew->work); 14811fa44ecaSJames Bottomley 14821fa44ecaSJames Bottomley return 1; 14831fa44ecaSJames Bottomley } 14841fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context); 14851fa44ecaSJames Bottomley 14861da177e4SLinus Torvalds int keventd_up(void) 14871da177e4SLinus Torvalds { 14881da177e4SLinus Torvalds return keventd_wq != NULL; 14891da177e4SLinus Torvalds } 14901da177e4SLinus Torvalds 14911da177e4SLinus Torvalds int current_is_keventd(void) 14921da177e4SLinus Torvalds { 14931da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq; 1494d243769dSHugh Dickins int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */ 14951da177e4SLinus Torvalds int ret = 0; 14961da177e4SLinus Torvalds 14971da177e4SLinus Torvalds BUG_ON(!keventd_wq); 14981da177e4SLinus Torvalds 14991537663fSTejun Heo cwq = get_cwq(cpu, keventd_wq); 1500c34056a3STejun Heo if (current == cwq->worker->task) 15011da177e4SLinus Torvalds ret = 1; 15021da177e4SLinus Torvalds 15031da177e4SLinus Torvalds return ret; 15041da177e4SLinus Torvalds 15051da177e4SLinus Torvalds } 15061da177e4SLinus Torvalds 15070f900049STejun Heo static struct cpu_workqueue_struct *alloc_cwqs(void) 15080f900049STejun Heo { 15090f900049STejun Heo /* 15100f900049STejun Heo * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS. 15110f900049STejun Heo * Make sure that the alignment isn't lower than that of 15120f900049STejun Heo * unsigned long long. 15130f900049STejun Heo */ 15140f900049STejun Heo const size_t size = sizeof(struct cpu_workqueue_struct); 15150f900049STejun Heo const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS, 15160f900049STejun Heo __alignof__(unsigned long long)); 15170f900049STejun Heo struct cpu_workqueue_struct *cwqs; 15180f900049STejun Heo #ifndef CONFIG_SMP 15190f900049STejun Heo void *ptr; 15200f900049STejun Heo 15210f900049STejun Heo /* 15220f900049STejun Heo * On UP, percpu allocator doesn't honor alignment parameter 15230f900049STejun Heo * and simply uses arch-dependent default. Allocate enough 15240f900049STejun Heo * room to align cwq and put an extra pointer at the end 15250f900049STejun Heo * pointing back to the originally allocated pointer which 15260f900049STejun Heo * will be used for free. 15270f900049STejun Heo * 15280f900049STejun Heo * FIXME: This really belongs to UP percpu code. Update UP 15290f900049STejun Heo * percpu code to honor alignment and remove this ugliness. 15300f900049STejun Heo */ 15310f900049STejun Heo ptr = __alloc_percpu(size + align + sizeof(void *), 1); 15320f900049STejun Heo cwqs = PTR_ALIGN(ptr, align); 15330f900049STejun Heo *(void **)per_cpu_ptr(cwqs + 1, 0) = ptr; 15340f900049STejun Heo #else 15350f900049STejun Heo /* On SMP, percpu allocator can do it itself */ 15360f900049STejun Heo cwqs = __alloc_percpu(size, align); 15370f900049STejun Heo #endif 15380f900049STejun Heo /* just in case, make sure it's actually aligned */ 15390f900049STejun Heo BUG_ON(!IS_ALIGNED((unsigned long)cwqs, align)); 15400f900049STejun Heo return cwqs; 15410f900049STejun Heo } 15420f900049STejun Heo 15430f900049STejun Heo static void free_cwqs(struct cpu_workqueue_struct *cwqs) 15440f900049STejun Heo { 15450f900049STejun Heo #ifndef CONFIG_SMP 15460f900049STejun Heo /* on UP, the pointer to free is stored right after the cwq */ 15470f900049STejun Heo if (cwqs) 15480f900049STejun Heo free_percpu(*(void **)per_cpu_ptr(cwqs + 1, 0)); 15490f900049STejun Heo #else 15500f900049STejun Heo free_percpu(cwqs); 15510f900049STejun Heo #endif 15520f900049STejun Heo } 15530f900049STejun Heo 15544e6045f1SJohannes Berg struct workqueue_struct *__create_workqueue_key(const char *name, 155597e37d7bSTejun Heo unsigned int flags, 15561e19ffc6STejun Heo int max_active, 1557eb13ba87SJohannes Berg struct lock_class_key *key, 1558eb13ba87SJohannes Berg const char *lock_name) 15593af24433SOleg Nesterov { 15601537663fSTejun Heo bool singlethread = flags & WQ_SINGLE_THREAD; 15613af24433SOleg Nesterov struct workqueue_struct *wq; 1562c34056a3STejun Heo bool failed = false; 1563c34056a3STejun Heo unsigned int cpu; 15643af24433SOleg Nesterov 15651e19ffc6STejun Heo max_active = clamp_val(max_active, 1, INT_MAX); 15661e19ffc6STejun Heo 15673af24433SOleg Nesterov wq = kzalloc(sizeof(*wq), GFP_KERNEL); 15683af24433SOleg Nesterov if (!wq) 15694690c4abSTejun Heo goto err; 15703af24433SOleg Nesterov 15710f900049STejun Heo wq->cpu_wq = alloc_cwqs(); 15724690c4abSTejun Heo if (!wq->cpu_wq) 15734690c4abSTejun Heo goto err; 15743af24433SOleg Nesterov 157597e37d7bSTejun Heo wq->flags = flags; 1576a0a1a5fdSTejun Heo wq->saved_max_active = max_active; 157773f53c4aSTejun Heo mutex_init(&wq->flush_mutex); 157873f53c4aSTejun Heo atomic_set(&wq->nr_cwqs_to_flush, 0); 157973f53c4aSTejun Heo INIT_LIST_HEAD(&wq->flusher_queue); 158073f53c4aSTejun Heo INIT_LIST_HEAD(&wq->flusher_overflow); 15813af24433SOleg Nesterov wq->name = name; 1582eb13ba87SJohannes Berg lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); 1583cce1a165SOleg Nesterov INIT_LIST_HEAD(&wq->list); 15843af24433SOleg Nesterov 15853da1c84cSOleg Nesterov cpu_maps_update_begin(); 15866af8bf3dSOleg Nesterov /* 15876af8bf3dSOleg Nesterov * We must initialize cwqs for each possible cpu even if we 15886af8bf3dSOleg Nesterov * are going to call destroy_workqueue() finally. Otherwise 15896af8bf3dSOleg Nesterov * cpu_up() can hit the uninitialized cwq once we drop the 15906af8bf3dSOleg Nesterov * lock. 15916af8bf3dSOleg Nesterov */ 15923af24433SOleg Nesterov for_each_possible_cpu(cpu) { 15931537663fSTejun Heo struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 15948b03ae3cSTejun Heo struct global_cwq *gcwq = get_gcwq(cpu); 15951537663fSTejun Heo 15960f900049STejun Heo BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK); 15978b03ae3cSTejun Heo cwq->gcwq = gcwq; 1598c34056a3STejun Heo cwq->wq = wq; 159973f53c4aSTejun Heo cwq->flush_color = -1; 16001e19ffc6STejun Heo cwq->max_active = max_active; 16011537663fSTejun Heo INIT_LIST_HEAD(&cwq->worklist); 16021e19ffc6STejun Heo INIT_LIST_HEAD(&cwq->delayed_works); 16031537663fSTejun Heo init_waitqueue_head(&cwq->more_work); 16041537663fSTejun Heo 1605c34056a3STejun Heo if (failed) 16063af24433SOleg Nesterov continue; 1607c34056a3STejun Heo cwq->worker = create_worker(cwq, 1608c34056a3STejun Heo cpu_online(cpu) && !singlethread); 1609c34056a3STejun Heo if (cwq->worker) 1610c34056a3STejun Heo start_worker(cwq->worker); 16111537663fSTejun Heo else 1612c34056a3STejun Heo failed = true; 16133af24433SOleg Nesterov } 16141537663fSTejun Heo 1615a0a1a5fdSTejun Heo /* 1616a0a1a5fdSTejun Heo * workqueue_lock protects global freeze state and workqueues 1617a0a1a5fdSTejun Heo * list. Grab it, set max_active accordingly and add the new 1618a0a1a5fdSTejun Heo * workqueue to workqueues list. 1619a0a1a5fdSTejun Heo */ 16201537663fSTejun Heo spin_lock(&workqueue_lock); 1621a0a1a5fdSTejun Heo 1622a0a1a5fdSTejun Heo if (workqueue_freezing && wq->flags & WQ_FREEZEABLE) 1623a0a1a5fdSTejun Heo for_each_possible_cpu(cpu) 1624a0a1a5fdSTejun Heo get_cwq(cpu, wq)->max_active = 0; 1625a0a1a5fdSTejun Heo 16261537663fSTejun Heo list_add(&wq->list, &workqueues); 1627a0a1a5fdSTejun Heo 16281537663fSTejun Heo spin_unlock(&workqueue_lock); 16291537663fSTejun Heo 16303da1c84cSOleg Nesterov cpu_maps_update_done(); 16313af24433SOleg Nesterov 1632c34056a3STejun Heo if (failed) { 16333af24433SOleg Nesterov destroy_workqueue(wq); 16343af24433SOleg Nesterov wq = NULL; 16353af24433SOleg Nesterov } 16363af24433SOleg Nesterov return wq; 16374690c4abSTejun Heo err: 16384690c4abSTejun Heo if (wq) { 16390f900049STejun Heo free_cwqs(wq->cpu_wq); 16404690c4abSTejun Heo kfree(wq); 16414690c4abSTejun Heo } 16424690c4abSTejun Heo return NULL; 16433af24433SOleg Nesterov } 16444e6045f1SJohannes Berg EXPORT_SYMBOL_GPL(__create_workqueue_key); 16453af24433SOleg Nesterov 16463af24433SOleg Nesterov /** 16473af24433SOleg Nesterov * destroy_workqueue - safely terminate a workqueue 16483af24433SOleg Nesterov * @wq: target workqueue 16493af24433SOleg Nesterov * 16503af24433SOleg Nesterov * Safely destroy a workqueue. All work currently pending will be done first. 16513af24433SOleg Nesterov */ 16523af24433SOleg Nesterov void destroy_workqueue(struct workqueue_struct *wq) 16533af24433SOleg Nesterov { 16543af24433SOleg Nesterov int cpu; 16553af24433SOleg Nesterov 1656a0a1a5fdSTejun Heo flush_workqueue(wq); 1657a0a1a5fdSTejun Heo 1658a0a1a5fdSTejun Heo /* 1659a0a1a5fdSTejun Heo * wq list is used to freeze wq, remove from list after 1660a0a1a5fdSTejun Heo * flushing is complete in case freeze races us. 1661a0a1a5fdSTejun Heo */ 16623da1c84cSOleg Nesterov cpu_maps_update_begin(); 166395402b38SGautham R Shenoy spin_lock(&workqueue_lock); 16643af24433SOleg Nesterov list_del(&wq->list); 166595402b38SGautham R Shenoy spin_unlock(&workqueue_lock); 16663da1c84cSOleg Nesterov cpu_maps_update_done(); 16673af24433SOleg Nesterov 166873f53c4aSTejun Heo for_each_possible_cpu(cpu) { 166973f53c4aSTejun Heo struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 167073f53c4aSTejun Heo int i; 167173f53c4aSTejun Heo 1672c34056a3STejun Heo if (cwq->worker) { 1673c34056a3STejun Heo destroy_worker(cwq->worker); 1674c34056a3STejun Heo cwq->worker = NULL; 167573f53c4aSTejun Heo } 167673f53c4aSTejun Heo 167773f53c4aSTejun Heo for (i = 0; i < WORK_NR_COLORS; i++) 167873f53c4aSTejun Heo BUG_ON(cwq->nr_in_flight[i]); 16791e19ffc6STejun Heo BUG_ON(cwq->nr_active); 16801e19ffc6STejun Heo BUG_ON(!list_empty(&cwq->delayed_works)); 168173f53c4aSTejun Heo } 16821537663fSTejun Heo 16830f900049STejun Heo free_cwqs(wq->cpu_wq); 16843af24433SOleg Nesterov kfree(wq); 16853af24433SOleg Nesterov } 16863af24433SOleg Nesterov EXPORT_SYMBOL_GPL(destroy_workqueue); 16873af24433SOleg Nesterov 16889c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, 16891da177e4SLinus Torvalds unsigned long action, 16901da177e4SLinus Torvalds void *hcpu) 16911da177e4SLinus Torvalds { 16923af24433SOleg Nesterov unsigned int cpu = (unsigned long)hcpu; 16933af24433SOleg Nesterov struct cpu_workqueue_struct *cwq; 16941da177e4SLinus Torvalds struct workqueue_struct *wq; 16951da177e4SLinus Torvalds 16968bb78442SRafael J. Wysocki action &= ~CPU_TASKS_FROZEN; 16978bb78442SRafael J. Wysocki 16981da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 16991537663fSTejun Heo if (wq->flags & WQ_SINGLE_THREAD) 17001537663fSTejun Heo continue; 17011537663fSTejun Heo 17021537663fSTejun Heo cwq = get_cwq(cpu, wq); 17033af24433SOleg Nesterov 17043af24433SOleg Nesterov switch (action) { 17053da1c84cSOleg Nesterov case CPU_POST_DEAD: 170673f53c4aSTejun Heo flush_workqueue(wq); 17071da177e4SLinus Torvalds break; 17081da177e4SLinus Torvalds } 17093af24433SOleg Nesterov } 17101da177e4SLinus Torvalds 17111537663fSTejun Heo return notifier_from_errno(0); 17121da177e4SLinus Torvalds } 17131da177e4SLinus Torvalds 17142d3854a3SRusty Russell #ifdef CONFIG_SMP 17158ccad40dSRusty Russell 17162d3854a3SRusty Russell struct work_for_cpu { 17176b44003eSAndrew Morton struct completion completion; 17182d3854a3SRusty Russell long (*fn)(void *); 17192d3854a3SRusty Russell void *arg; 17202d3854a3SRusty Russell long ret; 17212d3854a3SRusty Russell }; 17222d3854a3SRusty Russell 17236b44003eSAndrew Morton static int do_work_for_cpu(void *_wfc) 17242d3854a3SRusty Russell { 17256b44003eSAndrew Morton struct work_for_cpu *wfc = _wfc; 17262d3854a3SRusty Russell wfc->ret = wfc->fn(wfc->arg); 17276b44003eSAndrew Morton complete(&wfc->completion); 17286b44003eSAndrew Morton return 0; 17292d3854a3SRusty Russell } 17302d3854a3SRusty Russell 17312d3854a3SRusty Russell /** 17322d3854a3SRusty Russell * work_on_cpu - run a function in user context on a particular cpu 17332d3854a3SRusty Russell * @cpu: the cpu to run on 17342d3854a3SRusty Russell * @fn: the function to run 17352d3854a3SRusty Russell * @arg: the function arg 17362d3854a3SRusty Russell * 173731ad9081SRusty Russell * This will return the value @fn returns. 173831ad9081SRusty Russell * It is up to the caller to ensure that the cpu doesn't go offline. 17396b44003eSAndrew Morton * The caller must not hold any locks which would prevent @fn from completing. 17402d3854a3SRusty Russell */ 17412d3854a3SRusty Russell long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) 17422d3854a3SRusty Russell { 17436b44003eSAndrew Morton struct task_struct *sub_thread; 17446b44003eSAndrew Morton struct work_for_cpu wfc = { 17456b44003eSAndrew Morton .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion), 17466b44003eSAndrew Morton .fn = fn, 17476b44003eSAndrew Morton .arg = arg, 17486b44003eSAndrew Morton }; 17492d3854a3SRusty Russell 17506b44003eSAndrew Morton sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu"); 17516b44003eSAndrew Morton if (IS_ERR(sub_thread)) 17526b44003eSAndrew Morton return PTR_ERR(sub_thread); 17536b44003eSAndrew Morton kthread_bind(sub_thread, cpu); 17546b44003eSAndrew Morton wake_up_process(sub_thread); 17556b44003eSAndrew Morton wait_for_completion(&wfc.completion); 17562d3854a3SRusty Russell return wfc.ret; 17572d3854a3SRusty Russell } 17582d3854a3SRusty Russell EXPORT_SYMBOL_GPL(work_on_cpu); 17592d3854a3SRusty Russell #endif /* CONFIG_SMP */ 17602d3854a3SRusty Russell 1761a0a1a5fdSTejun Heo #ifdef CONFIG_FREEZER 1762a0a1a5fdSTejun Heo 1763a0a1a5fdSTejun Heo /** 1764a0a1a5fdSTejun Heo * freeze_workqueues_begin - begin freezing workqueues 1765a0a1a5fdSTejun Heo * 1766a0a1a5fdSTejun Heo * Start freezing workqueues. After this function returns, all 1767a0a1a5fdSTejun Heo * freezeable workqueues will queue new works to their frozen_works 1768a0a1a5fdSTejun Heo * list instead of the cwq ones. 1769a0a1a5fdSTejun Heo * 1770a0a1a5fdSTejun Heo * CONTEXT: 17718b03ae3cSTejun Heo * Grabs and releases workqueue_lock and gcwq->lock's. 1772a0a1a5fdSTejun Heo */ 1773a0a1a5fdSTejun Heo void freeze_workqueues_begin(void) 1774a0a1a5fdSTejun Heo { 1775a0a1a5fdSTejun Heo struct workqueue_struct *wq; 1776a0a1a5fdSTejun Heo unsigned int cpu; 1777a0a1a5fdSTejun Heo 1778a0a1a5fdSTejun Heo spin_lock(&workqueue_lock); 1779a0a1a5fdSTejun Heo 1780a0a1a5fdSTejun Heo BUG_ON(workqueue_freezing); 1781a0a1a5fdSTejun Heo workqueue_freezing = true; 1782a0a1a5fdSTejun Heo 1783a0a1a5fdSTejun Heo for_each_possible_cpu(cpu) { 17848b03ae3cSTejun Heo struct global_cwq *gcwq = get_gcwq(cpu); 17858b03ae3cSTejun Heo 17868b03ae3cSTejun Heo spin_lock_irq(&gcwq->lock); 17878b03ae3cSTejun Heo 1788a0a1a5fdSTejun Heo list_for_each_entry(wq, &workqueues, list) { 1789a0a1a5fdSTejun Heo struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 1790a0a1a5fdSTejun Heo 1791a0a1a5fdSTejun Heo if (wq->flags & WQ_FREEZEABLE) 1792a0a1a5fdSTejun Heo cwq->max_active = 0; 1793a0a1a5fdSTejun Heo } 17948b03ae3cSTejun Heo 17958b03ae3cSTejun Heo spin_unlock_irq(&gcwq->lock); 1796a0a1a5fdSTejun Heo } 1797a0a1a5fdSTejun Heo 1798a0a1a5fdSTejun Heo spin_unlock(&workqueue_lock); 1799a0a1a5fdSTejun Heo } 1800a0a1a5fdSTejun Heo 1801a0a1a5fdSTejun Heo /** 1802a0a1a5fdSTejun Heo * freeze_workqueues_busy - are freezeable workqueues still busy? 1803a0a1a5fdSTejun Heo * 1804a0a1a5fdSTejun Heo * Check whether freezing is complete. This function must be called 1805a0a1a5fdSTejun Heo * between freeze_workqueues_begin() and thaw_workqueues(). 1806a0a1a5fdSTejun Heo * 1807a0a1a5fdSTejun Heo * CONTEXT: 1808a0a1a5fdSTejun Heo * Grabs and releases workqueue_lock. 1809a0a1a5fdSTejun Heo * 1810a0a1a5fdSTejun Heo * RETURNS: 1811a0a1a5fdSTejun Heo * %true if some freezeable workqueues are still busy. %false if 1812a0a1a5fdSTejun Heo * freezing is complete. 1813a0a1a5fdSTejun Heo */ 1814a0a1a5fdSTejun Heo bool freeze_workqueues_busy(void) 1815a0a1a5fdSTejun Heo { 1816a0a1a5fdSTejun Heo struct workqueue_struct *wq; 1817a0a1a5fdSTejun Heo unsigned int cpu; 1818a0a1a5fdSTejun Heo bool busy = false; 1819a0a1a5fdSTejun Heo 1820a0a1a5fdSTejun Heo spin_lock(&workqueue_lock); 1821a0a1a5fdSTejun Heo 1822a0a1a5fdSTejun Heo BUG_ON(!workqueue_freezing); 1823a0a1a5fdSTejun Heo 1824a0a1a5fdSTejun Heo for_each_possible_cpu(cpu) { 1825a0a1a5fdSTejun Heo /* 1826a0a1a5fdSTejun Heo * nr_active is monotonically decreasing. It's safe 1827a0a1a5fdSTejun Heo * to peek without lock. 1828a0a1a5fdSTejun Heo */ 1829a0a1a5fdSTejun Heo list_for_each_entry(wq, &workqueues, list) { 1830a0a1a5fdSTejun Heo struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 1831a0a1a5fdSTejun Heo 1832a0a1a5fdSTejun Heo if (!(wq->flags & WQ_FREEZEABLE)) 1833a0a1a5fdSTejun Heo continue; 1834a0a1a5fdSTejun Heo 1835a0a1a5fdSTejun Heo BUG_ON(cwq->nr_active < 0); 1836a0a1a5fdSTejun Heo if (cwq->nr_active) { 1837a0a1a5fdSTejun Heo busy = true; 1838a0a1a5fdSTejun Heo goto out_unlock; 1839a0a1a5fdSTejun Heo } 1840a0a1a5fdSTejun Heo } 1841a0a1a5fdSTejun Heo } 1842a0a1a5fdSTejun Heo out_unlock: 1843a0a1a5fdSTejun Heo spin_unlock(&workqueue_lock); 1844a0a1a5fdSTejun Heo return busy; 1845a0a1a5fdSTejun Heo } 1846a0a1a5fdSTejun Heo 1847a0a1a5fdSTejun Heo /** 1848a0a1a5fdSTejun Heo * thaw_workqueues - thaw workqueues 1849a0a1a5fdSTejun Heo * 1850a0a1a5fdSTejun Heo * Thaw workqueues. Normal queueing is restored and all collected 1851a0a1a5fdSTejun Heo * frozen works are transferred to their respective cwq worklists. 1852a0a1a5fdSTejun Heo * 1853a0a1a5fdSTejun Heo * CONTEXT: 18548b03ae3cSTejun Heo * Grabs and releases workqueue_lock and gcwq->lock's. 1855a0a1a5fdSTejun Heo */ 1856a0a1a5fdSTejun Heo void thaw_workqueues(void) 1857a0a1a5fdSTejun Heo { 1858a0a1a5fdSTejun Heo struct workqueue_struct *wq; 1859a0a1a5fdSTejun Heo unsigned int cpu; 1860a0a1a5fdSTejun Heo 1861a0a1a5fdSTejun Heo spin_lock(&workqueue_lock); 1862a0a1a5fdSTejun Heo 1863a0a1a5fdSTejun Heo if (!workqueue_freezing) 1864a0a1a5fdSTejun Heo goto out_unlock; 1865a0a1a5fdSTejun Heo 1866a0a1a5fdSTejun Heo for_each_possible_cpu(cpu) { 18678b03ae3cSTejun Heo struct global_cwq *gcwq = get_gcwq(cpu); 18688b03ae3cSTejun Heo 18698b03ae3cSTejun Heo spin_lock_irq(&gcwq->lock); 18708b03ae3cSTejun Heo 1871a0a1a5fdSTejun Heo list_for_each_entry(wq, &workqueues, list) { 1872a0a1a5fdSTejun Heo struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 1873a0a1a5fdSTejun Heo 1874a0a1a5fdSTejun Heo if (!(wq->flags & WQ_FREEZEABLE)) 1875a0a1a5fdSTejun Heo continue; 1876a0a1a5fdSTejun Heo 1877a0a1a5fdSTejun Heo /* restore max_active and repopulate worklist */ 1878a0a1a5fdSTejun Heo cwq->max_active = wq->saved_max_active; 1879a0a1a5fdSTejun Heo 1880a0a1a5fdSTejun Heo while (!list_empty(&cwq->delayed_works) && 1881a0a1a5fdSTejun Heo cwq->nr_active < cwq->max_active) 1882a0a1a5fdSTejun Heo cwq_activate_first_delayed(cwq); 1883a0a1a5fdSTejun Heo 1884a0a1a5fdSTejun Heo wake_up(&cwq->more_work); 1885a0a1a5fdSTejun Heo } 18868b03ae3cSTejun Heo 18878b03ae3cSTejun Heo spin_unlock_irq(&gcwq->lock); 1888a0a1a5fdSTejun Heo } 1889a0a1a5fdSTejun Heo 1890a0a1a5fdSTejun Heo workqueue_freezing = false; 1891a0a1a5fdSTejun Heo out_unlock: 1892a0a1a5fdSTejun Heo spin_unlock(&workqueue_lock); 1893a0a1a5fdSTejun Heo } 1894a0a1a5fdSTejun Heo #endif /* CONFIG_FREEZER */ 1895a0a1a5fdSTejun Heo 1896c12920d1SOleg Nesterov void __init init_workqueues(void) 18971da177e4SLinus Torvalds { 1898c34056a3STejun Heo unsigned int cpu; 1899c34056a3STejun Heo 1900e7577c50SRusty Russell singlethread_cpu = cpumask_first(cpu_possible_mask); 19011da177e4SLinus Torvalds hotcpu_notifier(workqueue_cpu_callback, 0); 19028b03ae3cSTejun Heo 19038b03ae3cSTejun Heo /* initialize gcwqs */ 19048b03ae3cSTejun Heo for_each_possible_cpu(cpu) { 19058b03ae3cSTejun Heo struct global_cwq *gcwq = get_gcwq(cpu); 19068b03ae3cSTejun Heo 19078b03ae3cSTejun Heo spin_lock_init(&gcwq->lock); 19088b03ae3cSTejun Heo gcwq->cpu = cpu; 19098b03ae3cSTejun Heo 19108b03ae3cSTejun Heo ida_init(&gcwq->worker_ida); 19118b03ae3cSTejun Heo } 19128b03ae3cSTejun Heo 19131da177e4SLinus Torvalds keventd_wq = create_workqueue("events"); 19141da177e4SLinus Torvalds BUG_ON(!keventd_wq); 19151da177e4SLinus Torvalds } 1916