11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/kernel/workqueue.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Generic mechanism for defining kernel helper threads for running 51da177e4SLinus Torvalds * arbitrary tasks in process context. 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Started by Ingo Molnar, Copyright (C) 2002 81da177e4SLinus Torvalds * 91da177e4SLinus Torvalds * Derived from the taskqueue/keventd code by: 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * David Woodhouse <dwmw2@infradead.org> 12e1f8e874SFrancois Cami * Andrew Morton 131da177e4SLinus Torvalds * Kai Petzke <wpp@marie.physik.tu-berlin.de> 141da177e4SLinus Torvalds * Theodore Ts'o <tytso@mit.edu> 1589ada679SChristoph Lameter * 16cde53535SChristoph Lameter * Made to use alloc_percpu by Christoph Lameter. 171da177e4SLinus Torvalds */ 181da177e4SLinus Torvalds 191da177e4SLinus Torvalds #include <linux/module.h> 201da177e4SLinus Torvalds #include <linux/kernel.h> 211da177e4SLinus Torvalds #include <linux/sched.h> 221da177e4SLinus Torvalds #include <linux/init.h> 231da177e4SLinus Torvalds #include <linux/signal.h> 241da177e4SLinus Torvalds #include <linux/completion.h> 251da177e4SLinus Torvalds #include <linux/workqueue.h> 261da177e4SLinus Torvalds #include <linux/slab.h> 271da177e4SLinus Torvalds #include <linux/cpu.h> 281da177e4SLinus Torvalds #include <linux/notifier.h> 291da177e4SLinus Torvalds #include <linux/kthread.h> 301fa44ecaSJames Bottomley #include <linux/hardirq.h> 3146934023SChristoph Lameter #include <linux/mempolicy.h> 32341a5958SRafael J. Wysocki #include <linux/freezer.h> 33d5abe669SPeter Zijlstra #include <linux/kallsyms.h> 34d5abe669SPeter Zijlstra #include <linux/debug_locks.h> 354e6045f1SJohannes Berg #include <linux/lockdep.h> 36c34056a3STejun Heo #include <linux/idr.h> 371da177e4SLinus Torvalds 38c8e55f36STejun Heo enum { 39c8e55f36STejun Heo /* worker flags */ 40c8e55f36STejun Heo WORKER_STARTED = 1 << 0, /* started */ 41c8e55f36STejun Heo WORKER_DIE = 1 << 1, /* die die die */ 42c8e55f36STejun Heo WORKER_IDLE = 1 << 2, /* is idle */ 43c8e55f36STejun Heo 44c8e55f36STejun Heo BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ 45c8e55f36STejun Heo BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER, 46c8e55f36STejun Heo BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1, 47c8e55f36STejun Heo }; 48c8e55f36STejun Heo 491da177e4SLinus Torvalds /* 504690c4abSTejun Heo * Structure fields follow one of the following exclusion rules. 514690c4abSTejun Heo * 524690c4abSTejun Heo * I: Set during initialization and read-only afterwards. 534690c4abSTejun Heo * 548b03ae3cSTejun Heo * L: gcwq->lock protected. Access with gcwq->lock held. 554690c4abSTejun Heo * 5673f53c4aSTejun Heo * F: wq->flush_mutex protected. 5773f53c4aSTejun Heo * 584690c4abSTejun Heo * W: workqueue_lock protected. 594690c4abSTejun Heo */ 604690c4abSTejun Heo 618b03ae3cSTejun Heo struct global_cwq; 62c34056a3STejun Heo struct cpu_workqueue_struct; 63c34056a3STejun Heo 64c34056a3STejun Heo struct worker { 65c8e55f36STejun Heo /* on idle list while idle, on busy hash table while busy */ 66c8e55f36STejun Heo union { 67c8e55f36STejun Heo struct list_head entry; /* L: while idle */ 68c8e55f36STejun Heo struct hlist_node hentry; /* L: while busy */ 69c8e55f36STejun Heo }; 70c8e55f36STejun Heo 71c34056a3STejun Heo struct work_struct *current_work; /* L: work being processed */ 72affee4b2STejun Heo struct list_head scheduled; /* L: scheduled works */ 73c34056a3STejun Heo struct task_struct *task; /* I: worker task */ 748b03ae3cSTejun Heo struct global_cwq *gcwq; /* I: the associated gcwq */ 75c34056a3STejun Heo struct cpu_workqueue_struct *cwq; /* I: the associated cwq */ 76c8e55f36STejun Heo unsigned int flags; /* L: flags */ 77c34056a3STejun Heo int id; /* I: worker id */ 78c34056a3STejun Heo }; 79c34056a3STejun Heo 804690c4abSTejun Heo /* 818b03ae3cSTejun Heo * Global per-cpu workqueue. 828b03ae3cSTejun Heo */ 838b03ae3cSTejun Heo struct global_cwq { 848b03ae3cSTejun Heo spinlock_t lock; /* the gcwq lock */ 858b03ae3cSTejun Heo unsigned int cpu; /* I: the associated cpu */ 86c8e55f36STejun Heo 87c8e55f36STejun Heo int nr_workers; /* L: total number of workers */ 88c8e55f36STejun Heo int nr_idle; /* L: currently idle ones */ 89c8e55f36STejun Heo 90c8e55f36STejun Heo /* workers are chained either in the idle_list or busy_hash */ 91c8e55f36STejun Heo struct list_head idle_list; /* L: list of idle workers */ 92c8e55f36STejun Heo struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE]; 93c8e55f36STejun Heo /* L: hash of busy workers */ 94c8e55f36STejun Heo 958b03ae3cSTejun Heo struct ida worker_ida; /* L: for worker IDs */ 968b03ae3cSTejun Heo } ____cacheline_aligned_in_smp; 978b03ae3cSTejun Heo 988b03ae3cSTejun Heo /* 99f756d5e2SNathan Lynch * The per-CPU workqueue (if single thread, we always use the first 1000f900049STejun Heo * possible cpu). The lower WORK_STRUCT_FLAG_BITS of 1010f900049STejun Heo * work_struct->data are used for flags and thus cwqs need to be 1020f900049STejun Heo * aligned at two's power of the number of flag bits. 1031da177e4SLinus Torvalds */ 1041da177e4SLinus Torvalds struct cpu_workqueue_struct { 1058b03ae3cSTejun Heo struct global_cwq *gcwq; /* I: the associated gcwq */ 1061da177e4SLinus Torvalds struct list_head worklist; 107c34056a3STejun Heo struct worker *worker; 1084690c4abSTejun Heo struct workqueue_struct *wq; /* I: the owning workqueue */ 10973f53c4aSTejun Heo int work_color; /* L: current color */ 11073f53c4aSTejun Heo int flush_color; /* L: flushing color */ 11173f53c4aSTejun Heo int nr_in_flight[WORK_NR_COLORS]; 11273f53c4aSTejun Heo /* L: nr of in_flight works */ 1131e19ffc6STejun Heo int nr_active; /* L: nr of active works */ 114a0a1a5fdSTejun Heo int max_active; /* L: max active works */ 1151e19ffc6STejun Heo struct list_head delayed_works; /* L: delayed works */ 1160f900049STejun Heo }; 1171da177e4SLinus Torvalds 1181da177e4SLinus Torvalds /* 11973f53c4aSTejun Heo * Structure used to wait for workqueue flush. 12073f53c4aSTejun Heo */ 12173f53c4aSTejun Heo struct wq_flusher { 12273f53c4aSTejun Heo struct list_head list; /* F: list of flushers */ 12373f53c4aSTejun Heo int flush_color; /* F: flush color waiting for */ 12473f53c4aSTejun Heo struct completion done; /* flush completion */ 12573f53c4aSTejun Heo }; 12673f53c4aSTejun Heo 12773f53c4aSTejun Heo /* 1281da177e4SLinus Torvalds * The externally visible workqueue abstraction is an array of 1291da177e4SLinus Torvalds * per-CPU workqueues: 1301da177e4SLinus Torvalds */ 1311da177e4SLinus Torvalds struct workqueue_struct { 13297e37d7bSTejun Heo unsigned int flags; /* I: WQ_* flags */ 1334690c4abSTejun Heo struct cpu_workqueue_struct *cpu_wq; /* I: cwq's */ 1344690c4abSTejun Heo struct list_head list; /* W: list of all workqueues */ 13573f53c4aSTejun Heo 13673f53c4aSTejun Heo struct mutex flush_mutex; /* protects wq flushing */ 13773f53c4aSTejun Heo int work_color; /* F: current work color */ 13873f53c4aSTejun Heo int flush_color; /* F: current flush color */ 13973f53c4aSTejun Heo atomic_t nr_cwqs_to_flush; /* flush in progress */ 14073f53c4aSTejun Heo struct wq_flusher *first_flusher; /* F: first flusher */ 14173f53c4aSTejun Heo struct list_head flusher_queue; /* F: flush waiters */ 14273f53c4aSTejun Heo struct list_head flusher_overflow; /* F: flush overflow list */ 14373f53c4aSTejun Heo 144a0a1a5fdSTejun Heo int saved_max_active; /* I: saved cwq max_active */ 1454690c4abSTejun Heo const char *name; /* I: workqueue name */ 1464e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP 1474e6045f1SJohannes Berg struct lockdep_map lockdep_map; 1484e6045f1SJohannes Berg #endif 1491da177e4SLinus Torvalds }; 1501da177e4SLinus Torvalds 151dc186ad7SThomas Gleixner #ifdef CONFIG_DEBUG_OBJECTS_WORK 152dc186ad7SThomas Gleixner 153dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr; 154dc186ad7SThomas Gleixner 155dc186ad7SThomas Gleixner /* 156dc186ad7SThomas Gleixner * fixup_init is called when: 157dc186ad7SThomas Gleixner * - an active object is initialized 158dc186ad7SThomas Gleixner */ 159dc186ad7SThomas Gleixner static int work_fixup_init(void *addr, enum debug_obj_state state) 160dc186ad7SThomas Gleixner { 161dc186ad7SThomas Gleixner struct work_struct *work = addr; 162dc186ad7SThomas Gleixner 163dc186ad7SThomas Gleixner switch (state) { 164dc186ad7SThomas Gleixner case ODEBUG_STATE_ACTIVE: 165dc186ad7SThomas Gleixner cancel_work_sync(work); 166dc186ad7SThomas Gleixner debug_object_init(work, &work_debug_descr); 167dc186ad7SThomas Gleixner return 1; 168dc186ad7SThomas Gleixner default: 169dc186ad7SThomas Gleixner return 0; 170dc186ad7SThomas Gleixner } 171dc186ad7SThomas Gleixner } 172dc186ad7SThomas Gleixner 173dc186ad7SThomas Gleixner /* 174dc186ad7SThomas Gleixner * fixup_activate is called when: 175dc186ad7SThomas Gleixner * - an active object is activated 176dc186ad7SThomas Gleixner * - an unknown object is activated (might be a statically initialized object) 177dc186ad7SThomas Gleixner */ 178dc186ad7SThomas Gleixner static int work_fixup_activate(void *addr, enum debug_obj_state state) 179dc186ad7SThomas Gleixner { 180dc186ad7SThomas Gleixner struct work_struct *work = addr; 181dc186ad7SThomas Gleixner 182dc186ad7SThomas Gleixner switch (state) { 183dc186ad7SThomas Gleixner 184dc186ad7SThomas Gleixner case ODEBUG_STATE_NOTAVAILABLE: 185dc186ad7SThomas Gleixner /* 186dc186ad7SThomas Gleixner * This is not really a fixup. The work struct was 187dc186ad7SThomas Gleixner * statically initialized. We just make sure that it 188dc186ad7SThomas Gleixner * is tracked in the object tracker. 189dc186ad7SThomas Gleixner */ 19022df02bbSTejun Heo if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) { 191dc186ad7SThomas Gleixner debug_object_init(work, &work_debug_descr); 192dc186ad7SThomas Gleixner debug_object_activate(work, &work_debug_descr); 193dc186ad7SThomas Gleixner return 0; 194dc186ad7SThomas Gleixner } 195dc186ad7SThomas Gleixner WARN_ON_ONCE(1); 196dc186ad7SThomas Gleixner return 0; 197dc186ad7SThomas Gleixner 198dc186ad7SThomas Gleixner case ODEBUG_STATE_ACTIVE: 199dc186ad7SThomas Gleixner WARN_ON(1); 200dc186ad7SThomas Gleixner 201dc186ad7SThomas Gleixner default: 202dc186ad7SThomas Gleixner return 0; 203dc186ad7SThomas Gleixner } 204dc186ad7SThomas Gleixner } 205dc186ad7SThomas Gleixner 206dc186ad7SThomas Gleixner /* 207dc186ad7SThomas Gleixner * fixup_free is called when: 208dc186ad7SThomas Gleixner * - an active object is freed 209dc186ad7SThomas Gleixner */ 210dc186ad7SThomas Gleixner static int work_fixup_free(void *addr, enum debug_obj_state state) 211dc186ad7SThomas Gleixner { 212dc186ad7SThomas Gleixner struct work_struct *work = addr; 213dc186ad7SThomas Gleixner 214dc186ad7SThomas Gleixner switch (state) { 215dc186ad7SThomas Gleixner case ODEBUG_STATE_ACTIVE: 216dc186ad7SThomas Gleixner cancel_work_sync(work); 217dc186ad7SThomas Gleixner debug_object_free(work, &work_debug_descr); 218dc186ad7SThomas Gleixner return 1; 219dc186ad7SThomas Gleixner default: 220dc186ad7SThomas Gleixner return 0; 221dc186ad7SThomas Gleixner } 222dc186ad7SThomas Gleixner } 223dc186ad7SThomas Gleixner 224dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr = { 225dc186ad7SThomas Gleixner .name = "work_struct", 226dc186ad7SThomas Gleixner .fixup_init = work_fixup_init, 227dc186ad7SThomas Gleixner .fixup_activate = work_fixup_activate, 228dc186ad7SThomas Gleixner .fixup_free = work_fixup_free, 229dc186ad7SThomas Gleixner }; 230dc186ad7SThomas Gleixner 231dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work) 232dc186ad7SThomas Gleixner { 233dc186ad7SThomas Gleixner debug_object_activate(work, &work_debug_descr); 234dc186ad7SThomas Gleixner } 235dc186ad7SThomas Gleixner 236dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work) 237dc186ad7SThomas Gleixner { 238dc186ad7SThomas Gleixner debug_object_deactivate(work, &work_debug_descr); 239dc186ad7SThomas Gleixner } 240dc186ad7SThomas Gleixner 241dc186ad7SThomas Gleixner void __init_work(struct work_struct *work, int onstack) 242dc186ad7SThomas Gleixner { 243dc186ad7SThomas Gleixner if (onstack) 244dc186ad7SThomas Gleixner debug_object_init_on_stack(work, &work_debug_descr); 245dc186ad7SThomas Gleixner else 246dc186ad7SThomas Gleixner debug_object_init(work, &work_debug_descr); 247dc186ad7SThomas Gleixner } 248dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(__init_work); 249dc186ad7SThomas Gleixner 250dc186ad7SThomas Gleixner void destroy_work_on_stack(struct work_struct *work) 251dc186ad7SThomas Gleixner { 252dc186ad7SThomas Gleixner debug_object_free(work, &work_debug_descr); 253dc186ad7SThomas Gleixner } 254dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(destroy_work_on_stack); 255dc186ad7SThomas Gleixner 256dc186ad7SThomas Gleixner #else 257dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work) { } 258dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work) { } 259dc186ad7SThomas Gleixner #endif 260dc186ad7SThomas Gleixner 26195402b38SGautham R Shenoy /* Serializes the accesses to the list of workqueues. */ 26295402b38SGautham R Shenoy static DEFINE_SPINLOCK(workqueue_lock); 2631da177e4SLinus Torvalds static LIST_HEAD(workqueues); 264a0a1a5fdSTejun Heo static bool workqueue_freezing; /* W: have wqs started freezing? */ 265c34056a3STejun Heo 2668b03ae3cSTejun Heo static DEFINE_PER_CPU(struct global_cwq, global_cwq); 2678b03ae3cSTejun Heo 268c34056a3STejun Heo static int worker_thread(void *__worker); 2691da177e4SLinus Torvalds 2703af24433SOleg Nesterov static int singlethread_cpu __read_mostly; 271b1f4ec17SOleg Nesterov 2728b03ae3cSTejun Heo static struct global_cwq *get_gcwq(unsigned int cpu) 2738b03ae3cSTejun Heo { 2748b03ae3cSTejun Heo return &per_cpu(global_cwq, cpu); 2758b03ae3cSTejun Heo } 2768b03ae3cSTejun Heo 2774690c4abSTejun Heo static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, 2784690c4abSTejun Heo struct workqueue_struct *wq) 279a848e3b6SOleg Nesterov { 280a848e3b6SOleg Nesterov return per_cpu_ptr(wq->cpu_wq, cpu); 281a848e3b6SOleg Nesterov } 282a848e3b6SOleg Nesterov 2831537663fSTejun Heo static struct cpu_workqueue_struct *target_cwq(unsigned int cpu, 2841537663fSTejun Heo struct workqueue_struct *wq) 2851537663fSTejun Heo { 2861537663fSTejun Heo if (unlikely(wq->flags & WQ_SINGLE_THREAD)) 2871537663fSTejun Heo cpu = singlethread_cpu; 2881537663fSTejun Heo return get_cwq(cpu, wq); 2891537663fSTejun Heo } 2901537663fSTejun Heo 29173f53c4aSTejun Heo static unsigned int work_color_to_flags(int color) 29273f53c4aSTejun Heo { 29373f53c4aSTejun Heo return color << WORK_STRUCT_COLOR_SHIFT; 29473f53c4aSTejun Heo } 29573f53c4aSTejun Heo 29673f53c4aSTejun Heo static int get_work_color(struct work_struct *work) 29773f53c4aSTejun Heo { 29873f53c4aSTejun Heo return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) & 29973f53c4aSTejun Heo ((1 << WORK_STRUCT_COLOR_BITS) - 1); 30073f53c4aSTejun Heo } 30173f53c4aSTejun Heo 30273f53c4aSTejun Heo static int work_next_color(int color) 30373f53c4aSTejun Heo { 30473f53c4aSTejun Heo return (color + 1) % WORK_NR_COLORS; 30573f53c4aSTejun Heo } 30673f53c4aSTejun Heo 3074594bf15SDavid Howells /* 3084594bf15SDavid Howells * Set the workqueue on which a work item is to be run 3094594bf15SDavid Howells * - Must *only* be called if the pending flag is set 3104594bf15SDavid Howells */ 311ed7c0feeSOleg Nesterov static inline void set_wq_data(struct work_struct *work, 3124690c4abSTejun Heo struct cpu_workqueue_struct *cwq, 3134690c4abSTejun Heo unsigned long extra_flags) 314365970a1SDavid Howells { 3154594bf15SDavid Howells BUG_ON(!work_pending(work)); 3164594bf15SDavid Howells 3174690c4abSTejun Heo atomic_long_set(&work->data, (unsigned long)cwq | work_static(work) | 31822df02bbSTejun Heo WORK_STRUCT_PENDING | extra_flags); 319365970a1SDavid Howells } 320365970a1SDavid Howells 3214d707b9fSOleg Nesterov /* 3224d707b9fSOleg Nesterov * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued. 3234d707b9fSOleg Nesterov */ 3244d707b9fSOleg Nesterov static inline void clear_wq_data(struct work_struct *work) 3254d707b9fSOleg Nesterov { 3264690c4abSTejun Heo atomic_long_set(&work->data, work_static(work)); 3274d707b9fSOleg Nesterov } 3284d707b9fSOleg Nesterov 32964166699STejun Heo static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) 330365970a1SDavid Howells { 33164166699STejun Heo return (void *)(atomic_long_read(&work->data) & 33264166699STejun Heo WORK_STRUCT_WQ_DATA_MASK); 333365970a1SDavid Howells } 334365970a1SDavid Howells 3354690c4abSTejun Heo /** 336c8e55f36STejun Heo * busy_worker_head - return the busy hash head for a work 337c8e55f36STejun Heo * @gcwq: gcwq of interest 338c8e55f36STejun Heo * @work: work to be hashed 339c8e55f36STejun Heo * 340c8e55f36STejun Heo * Return hash head of @gcwq for @work. 341c8e55f36STejun Heo * 342c8e55f36STejun Heo * CONTEXT: 343c8e55f36STejun Heo * spin_lock_irq(gcwq->lock). 344c8e55f36STejun Heo * 345c8e55f36STejun Heo * RETURNS: 346c8e55f36STejun Heo * Pointer to the hash head. 347c8e55f36STejun Heo */ 348c8e55f36STejun Heo static struct hlist_head *busy_worker_head(struct global_cwq *gcwq, 349c8e55f36STejun Heo struct work_struct *work) 350c8e55f36STejun Heo { 351c8e55f36STejun Heo const int base_shift = ilog2(sizeof(struct work_struct)); 352c8e55f36STejun Heo unsigned long v = (unsigned long)work; 353c8e55f36STejun Heo 354c8e55f36STejun Heo /* simple shift and fold hash, do we need something better? */ 355c8e55f36STejun Heo v >>= base_shift; 356c8e55f36STejun Heo v += v >> BUSY_WORKER_HASH_ORDER; 357c8e55f36STejun Heo v &= BUSY_WORKER_HASH_MASK; 358c8e55f36STejun Heo 359c8e55f36STejun Heo return &gcwq->busy_hash[v]; 360c8e55f36STejun Heo } 361c8e55f36STejun Heo 362c8e55f36STejun Heo /** 3634690c4abSTejun Heo * insert_work - insert a work into cwq 3644690c4abSTejun Heo * @cwq: cwq @work belongs to 3654690c4abSTejun Heo * @work: work to insert 3664690c4abSTejun Heo * @head: insertion point 3674690c4abSTejun Heo * @extra_flags: extra WORK_STRUCT_* flags to set 3684690c4abSTejun Heo * 3694690c4abSTejun Heo * Insert @work into @cwq after @head. 3704690c4abSTejun Heo * 3714690c4abSTejun Heo * CONTEXT: 3728b03ae3cSTejun Heo * spin_lock_irq(gcwq->lock). 3734690c4abSTejun Heo */ 374b89deed3SOleg Nesterov static void insert_work(struct cpu_workqueue_struct *cwq, 3754690c4abSTejun Heo struct work_struct *work, struct list_head *head, 3764690c4abSTejun Heo unsigned int extra_flags) 377b89deed3SOleg Nesterov { 3784690c4abSTejun Heo /* we own @work, set data and link */ 3794690c4abSTejun Heo set_wq_data(work, cwq, extra_flags); 3804690c4abSTejun Heo 3816e84d644SOleg Nesterov /* 3826e84d644SOleg Nesterov * Ensure that we get the right work->data if we see the 3836e84d644SOleg Nesterov * result of list_add() below, see try_to_grab_pending(). 3846e84d644SOleg Nesterov */ 3856e84d644SOleg Nesterov smp_wmb(); 3864690c4abSTejun Heo 3871a4d9b0aSOleg Nesterov list_add_tail(&work->entry, head); 388c8e55f36STejun Heo wake_up_process(cwq->worker->task); 389b89deed3SOleg Nesterov } 390b89deed3SOleg Nesterov 3914690c4abSTejun Heo static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, 3921da177e4SLinus Torvalds struct work_struct *work) 3931da177e4SLinus Torvalds { 3941537663fSTejun Heo struct cpu_workqueue_struct *cwq = target_cwq(cpu, wq); 3958b03ae3cSTejun Heo struct global_cwq *gcwq = cwq->gcwq; 3961e19ffc6STejun Heo struct list_head *worklist; 3971da177e4SLinus Torvalds unsigned long flags; 3981da177e4SLinus Torvalds 399dc186ad7SThomas Gleixner debug_work_activate(work); 4001e19ffc6STejun Heo 4018b03ae3cSTejun Heo spin_lock_irqsave(&gcwq->lock, flags); 4024690c4abSTejun Heo BUG_ON(!list_empty(&work->entry)); 4031e19ffc6STejun Heo 40473f53c4aSTejun Heo cwq->nr_in_flight[cwq->work_color]++; 4051e19ffc6STejun Heo 4061e19ffc6STejun Heo if (likely(cwq->nr_active < cwq->max_active)) { 4071e19ffc6STejun Heo cwq->nr_active++; 4081e19ffc6STejun Heo worklist = &cwq->worklist; 4091e19ffc6STejun Heo } else 4101e19ffc6STejun Heo worklist = &cwq->delayed_works; 4111e19ffc6STejun Heo 4121e19ffc6STejun Heo insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color)); 4131e19ffc6STejun Heo 4148b03ae3cSTejun Heo spin_unlock_irqrestore(&gcwq->lock, flags); 4151da177e4SLinus Torvalds } 4161da177e4SLinus Torvalds 4170fcb78c2SRolf Eike Beer /** 4180fcb78c2SRolf Eike Beer * queue_work - queue work on a workqueue 4190fcb78c2SRolf Eike Beer * @wq: workqueue to use 4200fcb78c2SRolf Eike Beer * @work: work to queue 4210fcb78c2SRolf Eike Beer * 422057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 4231da177e4SLinus Torvalds * 42400dfcaf7SOleg Nesterov * We queue the work to the CPU on which it was submitted, but if the CPU dies 42500dfcaf7SOleg Nesterov * it can be processed by another CPU. 4261da177e4SLinus Torvalds */ 4277ad5b3a5SHarvey Harrison int queue_work(struct workqueue_struct *wq, struct work_struct *work) 4281da177e4SLinus Torvalds { 429ef1ca236SOleg Nesterov int ret; 4301da177e4SLinus Torvalds 431ef1ca236SOleg Nesterov ret = queue_work_on(get_cpu(), wq, work); 432a848e3b6SOleg Nesterov put_cpu(); 433ef1ca236SOleg Nesterov 4341da177e4SLinus Torvalds return ret; 4351da177e4SLinus Torvalds } 436ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work); 4371da177e4SLinus Torvalds 438c1a220e7SZhang Rui /** 439c1a220e7SZhang Rui * queue_work_on - queue work on specific cpu 440c1a220e7SZhang Rui * @cpu: CPU number to execute work on 441c1a220e7SZhang Rui * @wq: workqueue to use 442c1a220e7SZhang Rui * @work: work to queue 443c1a220e7SZhang Rui * 444c1a220e7SZhang Rui * Returns 0 if @work was already on a queue, non-zero otherwise. 445c1a220e7SZhang Rui * 446c1a220e7SZhang Rui * We queue the work to a specific CPU, the caller must ensure it 447c1a220e7SZhang Rui * can't go away. 448c1a220e7SZhang Rui */ 449c1a220e7SZhang Rui int 450c1a220e7SZhang Rui queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) 451c1a220e7SZhang Rui { 452c1a220e7SZhang Rui int ret = 0; 453c1a220e7SZhang Rui 45422df02bbSTejun Heo if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 4554690c4abSTejun Heo __queue_work(cpu, wq, work); 456c1a220e7SZhang Rui ret = 1; 457c1a220e7SZhang Rui } 458c1a220e7SZhang Rui return ret; 459c1a220e7SZhang Rui } 460c1a220e7SZhang Rui EXPORT_SYMBOL_GPL(queue_work_on); 461c1a220e7SZhang Rui 4626d141c3fSLi Zefan static void delayed_work_timer_fn(unsigned long __data) 4631da177e4SLinus Torvalds { 46452bad64dSDavid Howells struct delayed_work *dwork = (struct delayed_work *)__data; 465ed7c0feeSOleg Nesterov struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); 4661da177e4SLinus Torvalds 4674690c4abSTejun Heo __queue_work(smp_processor_id(), cwq->wq, &dwork->work); 4681da177e4SLinus Torvalds } 4691da177e4SLinus Torvalds 4700fcb78c2SRolf Eike Beer /** 4710fcb78c2SRolf Eike Beer * queue_delayed_work - queue work on a workqueue after delay 4720fcb78c2SRolf Eike Beer * @wq: workqueue to use 473af9997e4SRandy Dunlap * @dwork: delayable work to queue 4740fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 4750fcb78c2SRolf Eike Beer * 476057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 4770fcb78c2SRolf Eike Beer */ 4787ad5b3a5SHarvey Harrison int queue_delayed_work(struct workqueue_struct *wq, 47952bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 4801da177e4SLinus Torvalds { 48152bad64dSDavid Howells if (delay == 0) 48263bc0362SOleg Nesterov return queue_work(wq, &dwork->work); 4831da177e4SLinus Torvalds 48463bc0362SOleg Nesterov return queue_delayed_work_on(-1, wq, dwork, delay); 4851da177e4SLinus Torvalds } 486ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work); 4871da177e4SLinus Torvalds 4880fcb78c2SRolf Eike Beer /** 4890fcb78c2SRolf Eike Beer * queue_delayed_work_on - queue work on specific CPU after delay 4900fcb78c2SRolf Eike Beer * @cpu: CPU number to execute work on 4910fcb78c2SRolf Eike Beer * @wq: workqueue to use 492af9997e4SRandy Dunlap * @dwork: work to queue 4930fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 4940fcb78c2SRolf Eike Beer * 495057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 4960fcb78c2SRolf Eike Beer */ 4977a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 49852bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 4997a6bc1cdSVenkatesh Pallipadi { 5007a6bc1cdSVenkatesh Pallipadi int ret = 0; 50152bad64dSDavid Howells struct timer_list *timer = &dwork->timer; 50252bad64dSDavid Howells struct work_struct *work = &dwork->work; 5037a6bc1cdSVenkatesh Pallipadi 50422df02bbSTejun Heo if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 5057a6bc1cdSVenkatesh Pallipadi BUG_ON(timer_pending(timer)); 5067a6bc1cdSVenkatesh Pallipadi BUG_ON(!list_empty(&work->entry)); 5077a6bc1cdSVenkatesh Pallipadi 5088a3e77ccSAndrew Liu timer_stats_timer_set_start_info(&dwork->timer); 5098a3e77ccSAndrew Liu 510ed7c0feeSOleg Nesterov /* This stores cwq for the moment, for the timer_fn */ 5111537663fSTejun Heo set_wq_data(work, target_cwq(raw_smp_processor_id(), wq), 0); 5127a6bc1cdSVenkatesh Pallipadi timer->expires = jiffies + delay; 51352bad64dSDavid Howells timer->data = (unsigned long)dwork; 5147a6bc1cdSVenkatesh Pallipadi timer->function = delayed_work_timer_fn; 51563bc0362SOleg Nesterov 51663bc0362SOleg Nesterov if (unlikely(cpu >= 0)) 5177a6bc1cdSVenkatesh Pallipadi add_timer_on(timer, cpu); 51863bc0362SOleg Nesterov else 51963bc0362SOleg Nesterov add_timer(timer); 5207a6bc1cdSVenkatesh Pallipadi ret = 1; 5217a6bc1cdSVenkatesh Pallipadi } 5227a6bc1cdSVenkatesh Pallipadi return ret; 5237a6bc1cdSVenkatesh Pallipadi } 524ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on); 5251da177e4SLinus Torvalds 526c8e55f36STejun Heo /** 527c8e55f36STejun Heo * worker_enter_idle - enter idle state 528c8e55f36STejun Heo * @worker: worker which is entering idle state 529c8e55f36STejun Heo * 530c8e55f36STejun Heo * @worker is entering idle state. Update stats and idle timer if 531c8e55f36STejun Heo * necessary. 532c8e55f36STejun Heo * 533c8e55f36STejun Heo * LOCKING: 534c8e55f36STejun Heo * spin_lock_irq(gcwq->lock). 535c8e55f36STejun Heo */ 536c8e55f36STejun Heo static void worker_enter_idle(struct worker *worker) 537c8e55f36STejun Heo { 538c8e55f36STejun Heo struct global_cwq *gcwq = worker->gcwq; 539c8e55f36STejun Heo 540c8e55f36STejun Heo BUG_ON(worker->flags & WORKER_IDLE); 541c8e55f36STejun Heo BUG_ON(!list_empty(&worker->entry) && 542c8e55f36STejun Heo (worker->hentry.next || worker->hentry.pprev)); 543c8e55f36STejun Heo 544c8e55f36STejun Heo worker->flags |= WORKER_IDLE; 545c8e55f36STejun Heo gcwq->nr_idle++; 546c8e55f36STejun Heo 547c8e55f36STejun Heo /* idle_list is LIFO */ 548c8e55f36STejun Heo list_add(&worker->entry, &gcwq->idle_list); 549c8e55f36STejun Heo } 550c8e55f36STejun Heo 551c8e55f36STejun Heo /** 552c8e55f36STejun Heo * worker_leave_idle - leave idle state 553c8e55f36STejun Heo * @worker: worker which is leaving idle state 554c8e55f36STejun Heo * 555c8e55f36STejun Heo * @worker is leaving idle state. Update stats. 556c8e55f36STejun Heo * 557c8e55f36STejun Heo * LOCKING: 558c8e55f36STejun Heo * spin_lock_irq(gcwq->lock). 559c8e55f36STejun Heo */ 560c8e55f36STejun Heo static void worker_leave_idle(struct worker *worker) 561c8e55f36STejun Heo { 562c8e55f36STejun Heo struct global_cwq *gcwq = worker->gcwq; 563c8e55f36STejun Heo 564c8e55f36STejun Heo BUG_ON(!(worker->flags & WORKER_IDLE)); 565c8e55f36STejun Heo worker->flags &= ~WORKER_IDLE; 566c8e55f36STejun Heo gcwq->nr_idle--; 567c8e55f36STejun Heo list_del_init(&worker->entry); 568c8e55f36STejun Heo } 569c8e55f36STejun Heo 570c34056a3STejun Heo static struct worker *alloc_worker(void) 571c34056a3STejun Heo { 572c34056a3STejun Heo struct worker *worker; 573c34056a3STejun Heo 574c34056a3STejun Heo worker = kzalloc(sizeof(*worker), GFP_KERNEL); 575c8e55f36STejun Heo if (worker) { 576c8e55f36STejun Heo INIT_LIST_HEAD(&worker->entry); 577affee4b2STejun Heo INIT_LIST_HEAD(&worker->scheduled); 578c8e55f36STejun Heo } 579c34056a3STejun Heo return worker; 580c34056a3STejun Heo } 581c34056a3STejun Heo 582c34056a3STejun Heo /** 583c34056a3STejun Heo * create_worker - create a new workqueue worker 584c34056a3STejun Heo * @cwq: cwq the new worker will belong to 585c34056a3STejun Heo * @bind: whether to set affinity to @cpu or not 586c34056a3STejun Heo * 587c34056a3STejun Heo * Create a new worker which is bound to @cwq. The returned worker 588c34056a3STejun Heo * can be started by calling start_worker() or destroyed using 589c34056a3STejun Heo * destroy_worker(). 590c34056a3STejun Heo * 591c34056a3STejun Heo * CONTEXT: 592c34056a3STejun Heo * Might sleep. Does GFP_KERNEL allocations. 593c34056a3STejun Heo * 594c34056a3STejun Heo * RETURNS: 595c34056a3STejun Heo * Pointer to the newly created worker. 596c34056a3STejun Heo */ 597c34056a3STejun Heo static struct worker *create_worker(struct cpu_workqueue_struct *cwq, bool bind) 598c34056a3STejun Heo { 5998b03ae3cSTejun Heo struct global_cwq *gcwq = cwq->gcwq; 600c34056a3STejun Heo int id = -1; 601c34056a3STejun Heo struct worker *worker = NULL; 602c34056a3STejun Heo 6038b03ae3cSTejun Heo spin_lock_irq(&gcwq->lock); 6048b03ae3cSTejun Heo while (ida_get_new(&gcwq->worker_ida, &id)) { 6058b03ae3cSTejun Heo spin_unlock_irq(&gcwq->lock); 6068b03ae3cSTejun Heo if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL)) 607c34056a3STejun Heo goto fail; 6088b03ae3cSTejun Heo spin_lock_irq(&gcwq->lock); 609c34056a3STejun Heo } 6108b03ae3cSTejun Heo spin_unlock_irq(&gcwq->lock); 611c34056a3STejun Heo 612c34056a3STejun Heo worker = alloc_worker(); 613c34056a3STejun Heo if (!worker) 614c34056a3STejun Heo goto fail; 615c34056a3STejun Heo 6168b03ae3cSTejun Heo worker->gcwq = gcwq; 617c34056a3STejun Heo worker->cwq = cwq; 618c34056a3STejun Heo worker->id = id; 619c34056a3STejun Heo 620c34056a3STejun Heo worker->task = kthread_create(worker_thread, worker, "kworker/%u:%d", 6218b03ae3cSTejun Heo gcwq->cpu, id); 622c34056a3STejun Heo if (IS_ERR(worker->task)) 623c34056a3STejun Heo goto fail; 624c34056a3STejun Heo 625c34056a3STejun Heo if (bind) 6268b03ae3cSTejun Heo kthread_bind(worker->task, gcwq->cpu); 627c34056a3STejun Heo 628c34056a3STejun Heo return worker; 629c34056a3STejun Heo fail: 630c34056a3STejun Heo if (id >= 0) { 6318b03ae3cSTejun Heo spin_lock_irq(&gcwq->lock); 6328b03ae3cSTejun Heo ida_remove(&gcwq->worker_ida, id); 6338b03ae3cSTejun Heo spin_unlock_irq(&gcwq->lock); 634c34056a3STejun Heo } 635c34056a3STejun Heo kfree(worker); 636c34056a3STejun Heo return NULL; 637c34056a3STejun Heo } 638c34056a3STejun Heo 639c34056a3STejun Heo /** 640c34056a3STejun Heo * start_worker - start a newly created worker 641c34056a3STejun Heo * @worker: worker to start 642c34056a3STejun Heo * 643c8e55f36STejun Heo * Make the gcwq aware of @worker and start it. 644c34056a3STejun Heo * 645c34056a3STejun Heo * CONTEXT: 6468b03ae3cSTejun Heo * spin_lock_irq(gcwq->lock). 647c34056a3STejun Heo */ 648c34056a3STejun Heo static void start_worker(struct worker *worker) 649c34056a3STejun Heo { 650c8e55f36STejun Heo worker->flags |= WORKER_STARTED; 651c8e55f36STejun Heo worker->gcwq->nr_workers++; 652c8e55f36STejun Heo worker_enter_idle(worker); 653c34056a3STejun Heo wake_up_process(worker->task); 654c34056a3STejun Heo } 655c34056a3STejun Heo 656c34056a3STejun Heo /** 657c34056a3STejun Heo * destroy_worker - destroy a workqueue worker 658c34056a3STejun Heo * @worker: worker to be destroyed 659c34056a3STejun Heo * 660c8e55f36STejun Heo * Destroy @worker and adjust @gcwq stats accordingly. 661c8e55f36STejun Heo * 662c8e55f36STejun Heo * CONTEXT: 663c8e55f36STejun Heo * spin_lock_irq(gcwq->lock) which is released and regrabbed. 664c34056a3STejun Heo */ 665c34056a3STejun Heo static void destroy_worker(struct worker *worker) 666c34056a3STejun Heo { 6678b03ae3cSTejun Heo struct global_cwq *gcwq = worker->gcwq; 668c34056a3STejun Heo int id = worker->id; 669c34056a3STejun Heo 670c34056a3STejun Heo /* sanity check frenzy */ 671c34056a3STejun Heo BUG_ON(worker->current_work); 672affee4b2STejun Heo BUG_ON(!list_empty(&worker->scheduled)); 673c34056a3STejun Heo 674c8e55f36STejun Heo if (worker->flags & WORKER_STARTED) 675c8e55f36STejun Heo gcwq->nr_workers--; 676c8e55f36STejun Heo if (worker->flags & WORKER_IDLE) 677c8e55f36STejun Heo gcwq->nr_idle--; 678c8e55f36STejun Heo 679c8e55f36STejun Heo list_del_init(&worker->entry); 680c8e55f36STejun Heo worker->flags |= WORKER_DIE; 681c8e55f36STejun Heo 682c8e55f36STejun Heo spin_unlock_irq(&gcwq->lock); 683c8e55f36STejun Heo 684c34056a3STejun Heo kthread_stop(worker->task); 685c34056a3STejun Heo kfree(worker); 686c34056a3STejun Heo 6878b03ae3cSTejun Heo spin_lock_irq(&gcwq->lock); 6888b03ae3cSTejun Heo ida_remove(&gcwq->worker_ida, id); 689c34056a3STejun Heo } 690c34056a3STejun Heo 691a62428c0STejun Heo /** 692affee4b2STejun Heo * move_linked_works - move linked works to a list 693affee4b2STejun Heo * @work: start of series of works to be scheduled 694affee4b2STejun Heo * @head: target list to append @work to 695affee4b2STejun Heo * @nextp: out paramter for nested worklist walking 696affee4b2STejun Heo * 697affee4b2STejun Heo * Schedule linked works starting from @work to @head. Work series to 698affee4b2STejun Heo * be scheduled starts at @work and includes any consecutive work with 699affee4b2STejun Heo * WORK_STRUCT_LINKED set in its predecessor. 700affee4b2STejun Heo * 701affee4b2STejun Heo * If @nextp is not NULL, it's updated to point to the next work of 702affee4b2STejun Heo * the last scheduled work. This allows move_linked_works() to be 703affee4b2STejun Heo * nested inside outer list_for_each_entry_safe(). 704affee4b2STejun Heo * 705affee4b2STejun Heo * CONTEXT: 7068b03ae3cSTejun Heo * spin_lock_irq(gcwq->lock). 707affee4b2STejun Heo */ 708affee4b2STejun Heo static void move_linked_works(struct work_struct *work, struct list_head *head, 709affee4b2STejun Heo struct work_struct **nextp) 710affee4b2STejun Heo { 711affee4b2STejun Heo struct work_struct *n; 712affee4b2STejun Heo 713affee4b2STejun Heo /* 714affee4b2STejun Heo * Linked worklist will always end before the end of the list, 715affee4b2STejun Heo * use NULL for list head. 716affee4b2STejun Heo */ 717affee4b2STejun Heo list_for_each_entry_safe_from(work, n, NULL, entry) { 718affee4b2STejun Heo list_move_tail(&work->entry, head); 719affee4b2STejun Heo if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 720affee4b2STejun Heo break; 721affee4b2STejun Heo } 722affee4b2STejun Heo 723affee4b2STejun Heo /* 724affee4b2STejun Heo * If we're already inside safe list traversal and have moved 725affee4b2STejun Heo * multiple works to the scheduled queue, the next position 726affee4b2STejun Heo * needs to be updated. 727affee4b2STejun Heo */ 728affee4b2STejun Heo if (nextp) 729affee4b2STejun Heo *nextp = n; 730affee4b2STejun Heo } 731affee4b2STejun Heo 7321e19ffc6STejun Heo static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) 7331e19ffc6STejun Heo { 7341e19ffc6STejun Heo struct work_struct *work = list_first_entry(&cwq->delayed_works, 7351e19ffc6STejun Heo struct work_struct, entry); 7361e19ffc6STejun Heo 7371e19ffc6STejun Heo move_linked_works(work, &cwq->worklist, NULL); 7381e19ffc6STejun Heo cwq->nr_active++; 7391e19ffc6STejun Heo } 7401e19ffc6STejun Heo 741affee4b2STejun Heo /** 74273f53c4aSTejun Heo * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight 74373f53c4aSTejun Heo * @cwq: cwq of interest 74473f53c4aSTejun Heo * @color: color of work which left the queue 74573f53c4aSTejun Heo * 74673f53c4aSTejun Heo * A work either has completed or is removed from pending queue, 74773f53c4aSTejun Heo * decrement nr_in_flight of its cwq and handle workqueue flushing. 74873f53c4aSTejun Heo * 74973f53c4aSTejun Heo * CONTEXT: 7508b03ae3cSTejun Heo * spin_lock_irq(gcwq->lock). 75173f53c4aSTejun Heo */ 75273f53c4aSTejun Heo static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) 75373f53c4aSTejun Heo { 75473f53c4aSTejun Heo /* ignore uncolored works */ 75573f53c4aSTejun Heo if (color == WORK_NO_COLOR) 75673f53c4aSTejun Heo return; 75773f53c4aSTejun Heo 75873f53c4aSTejun Heo cwq->nr_in_flight[color]--; 7591e19ffc6STejun Heo cwq->nr_active--; 7601e19ffc6STejun Heo 7611e19ffc6STejun Heo /* one down, submit a delayed one */ 7621e19ffc6STejun Heo if (!list_empty(&cwq->delayed_works) && 7631e19ffc6STejun Heo cwq->nr_active < cwq->max_active) 7641e19ffc6STejun Heo cwq_activate_first_delayed(cwq); 76573f53c4aSTejun Heo 76673f53c4aSTejun Heo /* is flush in progress and are we at the flushing tip? */ 76773f53c4aSTejun Heo if (likely(cwq->flush_color != color)) 76873f53c4aSTejun Heo return; 76973f53c4aSTejun Heo 77073f53c4aSTejun Heo /* are there still in-flight works? */ 77173f53c4aSTejun Heo if (cwq->nr_in_flight[color]) 77273f53c4aSTejun Heo return; 77373f53c4aSTejun Heo 77473f53c4aSTejun Heo /* this cwq is done, clear flush_color */ 77573f53c4aSTejun Heo cwq->flush_color = -1; 77673f53c4aSTejun Heo 77773f53c4aSTejun Heo /* 77873f53c4aSTejun Heo * If this was the last cwq, wake up the first flusher. It 77973f53c4aSTejun Heo * will handle the rest. 78073f53c4aSTejun Heo */ 78173f53c4aSTejun Heo if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush)) 78273f53c4aSTejun Heo complete(&cwq->wq->first_flusher->done); 78373f53c4aSTejun Heo } 78473f53c4aSTejun Heo 78573f53c4aSTejun Heo /** 786a62428c0STejun Heo * process_one_work - process single work 787c34056a3STejun Heo * @worker: self 788a62428c0STejun Heo * @work: work to process 789a62428c0STejun Heo * 790a62428c0STejun Heo * Process @work. This function contains all the logics necessary to 791a62428c0STejun Heo * process a single work including synchronization against and 792a62428c0STejun Heo * interaction with other workers on the same cpu, queueing and 793a62428c0STejun Heo * flushing. As long as context requirement is met, any worker can 794a62428c0STejun Heo * call this function to process a work. 795a62428c0STejun Heo * 796a62428c0STejun Heo * CONTEXT: 7978b03ae3cSTejun Heo * spin_lock_irq(gcwq->lock) which is released and regrabbed. 798a62428c0STejun Heo */ 799c34056a3STejun Heo static void process_one_work(struct worker *worker, struct work_struct *work) 8001da177e4SLinus Torvalds { 801c34056a3STejun Heo struct cpu_workqueue_struct *cwq = worker->cwq; 8028b03ae3cSTejun Heo struct global_cwq *gcwq = cwq->gcwq; 803c8e55f36STejun Heo struct hlist_head *bwh = busy_worker_head(gcwq, work); 8046bb49e59SDavid Howells work_func_t f = work->func; 80573f53c4aSTejun Heo int work_color; 8064e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP 8074e6045f1SJohannes Berg /* 808a62428c0STejun Heo * It is permissible to free the struct work_struct from 809a62428c0STejun Heo * inside the function that is called from it, this we need to 810a62428c0STejun Heo * take into account for lockdep too. To avoid bogus "held 811a62428c0STejun Heo * lock freed" warnings as well as problems when looking into 812a62428c0STejun Heo * work->lockdep_map, make a copy and use that here. 8134e6045f1SJohannes Berg */ 8144e6045f1SJohannes Berg struct lockdep_map lockdep_map = work->lockdep_map; 8154e6045f1SJohannes Berg #endif 816a62428c0STejun Heo /* claim and process */ 817dc186ad7SThomas Gleixner debug_work_deactivate(work); 818c8e55f36STejun Heo hlist_add_head(&worker->hentry, bwh); 819c34056a3STejun Heo worker->current_work = work; 82073f53c4aSTejun Heo work_color = get_work_color(work); 821a62428c0STejun Heo list_del_init(&work->entry); 822a62428c0STejun Heo 8238b03ae3cSTejun Heo spin_unlock_irq(&gcwq->lock); 8241da177e4SLinus Torvalds 825365970a1SDavid Howells BUG_ON(get_wq_data(work) != cwq); 82623b2e599SOleg Nesterov work_clear_pending(work); 8273295f0efSIngo Molnar lock_map_acquire(&cwq->wq->lockdep_map); 8283295f0efSIngo Molnar lock_map_acquire(&lockdep_map); 82965f27f38SDavid Howells f(work); 8303295f0efSIngo Molnar lock_map_release(&lockdep_map); 8313295f0efSIngo Molnar lock_map_release(&cwq->wq->lockdep_map); 8321da177e4SLinus Torvalds 833d5abe669SPeter Zijlstra if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 834d5abe669SPeter Zijlstra printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " 835d5abe669SPeter Zijlstra "%s/0x%08x/%d\n", 836a62428c0STejun Heo current->comm, preempt_count(), task_pid_nr(current)); 837d5abe669SPeter Zijlstra printk(KERN_ERR " last function: "); 838d5abe669SPeter Zijlstra print_symbol("%s\n", (unsigned long)f); 839d5abe669SPeter Zijlstra debug_show_held_locks(current); 840d5abe669SPeter Zijlstra dump_stack(); 841d5abe669SPeter Zijlstra } 842d5abe669SPeter Zijlstra 8438b03ae3cSTejun Heo spin_lock_irq(&gcwq->lock); 844a62428c0STejun Heo 845a62428c0STejun Heo /* we're done with it, release */ 846c8e55f36STejun Heo hlist_del_init(&worker->hentry); 847c34056a3STejun Heo worker->current_work = NULL; 84873f53c4aSTejun Heo cwq_dec_nr_in_flight(cwq, work_color); 8491da177e4SLinus Torvalds } 850a62428c0STejun Heo 851affee4b2STejun Heo /** 852affee4b2STejun Heo * process_scheduled_works - process scheduled works 853affee4b2STejun Heo * @worker: self 854affee4b2STejun Heo * 855affee4b2STejun Heo * Process all scheduled works. Please note that the scheduled list 856affee4b2STejun Heo * may change while processing a work, so this function repeatedly 857affee4b2STejun Heo * fetches a work from the top and executes it. 858affee4b2STejun Heo * 859affee4b2STejun Heo * CONTEXT: 8608b03ae3cSTejun Heo * spin_lock_irq(gcwq->lock) which may be released and regrabbed 861affee4b2STejun Heo * multiple times. 862affee4b2STejun Heo */ 863affee4b2STejun Heo static void process_scheduled_works(struct worker *worker) 864a62428c0STejun Heo { 865affee4b2STejun Heo while (!list_empty(&worker->scheduled)) { 866affee4b2STejun Heo struct work_struct *work = list_first_entry(&worker->scheduled, 867a62428c0STejun Heo struct work_struct, entry); 868c34056a3STejun Heo process_one_work(worker, work); 869a62428c0STejun Heo } 8701da177e4SLinus Torvalds } 8711da177e4SLinus Torvalds 8724690c4abSTejun Heo /** 8734690c4abSTejun Heo * worker_thread - the worker thread function 874c34056a3STejun Heo * @__worker: self 8754690c4abSTejun Heo * 8764690c4abSTejun Heo * The cwq worker thread function. 8774690c4abSTejun Heo */ 878c34056a3STejun Heo static int worker_thread(void *__worker) 8791da177e4SLinus Torvalds { 880c34056a3STejun Heo struct worker *worker = __worker; 8818b03ae3cSTejun Heo struct global_cwq *gcwq = worker->gcwq; 882c34056a3STejun Heo struct cpu_workqueue_struct *cwq = worker->cwq; 8831da177e4SLinus Torvalds 884c8e55f36STejun Heo woke_up: 885c34056a3STejun Heo if (unlikely(!cpumask_equal(&worker->task->cpus_allowed, 8868b03ae3cSTejun Heo get_cpu_mask(gcwq->cpu)))) 887c8e55f36STejun Heo set_cpus_allowed_ptr(worker->task, get_cpu_mask(gcwq->cpu)); 888affee4b2STejun Heo 8898b03ae3cSTejun Heo spin_lock_irq(&gcwq->lock); 890affee4b2STejun Heo 891c8e55f36STejun Heo /* DIE can be set only while we're idle, checking here is enough */ 892c8e55f36STejun Heo if (worker->flags & WORKER_DIE) { 893c8e55f36STejun Heo spin_unlock_irq(&gcwq->lock); 894c8e55f36STejun Heo return 0; 895c8e55f36STejun Heo } 896c8e55f36STejun Heo 897c8e55f36STejun Heo worker_leave_idle(worker); 898c8e55f36STejun Heo 899c8e55f36STejun Heo /* 900c8e55f36STejun Heo * ->scheduled list can only be filled while a worker is 901c8e55f36STejun Heo * preparing to process a work or actually processing it. 902c8e55f36STejun Heo * Make sure nobody diddled with it while I was sleeping. 903c8e55f36STejun Heo */ 904c8e55f36STejun Heo BUG_ON(!list_empty(&worker->scheduled)); 905c8e55f36STejun Heo 906affee4b2STejun Heo while (!list_empty(&cwq->worklist)) { 907affee4b2STejun Heo struct work_struct *work = 908affee4b2STejun Heo list_first_entry(&cwq->worklist, 909affee4b2STejun Heo struct work_struct, entry); 910affee4b2STejun Heo 911c8e55f36STejun Heo if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { 912affee4b2STejun Heo /* optimization path, not strictly necessary */ 913affee4b2STejun Heo process_one_work(worker, work); 914affee4b2STejun Heo if (unlikely(!list_empty(&worker->scheduled))) 915affee4b2STejun Heo process_scheduled_works(worker); 916affee4b2STejun Heo } else { 917c8e55f36STejun Heo move_linked_works(work, &worker->scheduled, NULL); 918affee4b2STejun Heo process_scheduled_works(worker); 919affee4b2STejun Heo } 920affee4b2STejun Heo } 921affee4b2STejun Heo 922c8e55f36STejun Heo /* 923c8e55f36STejun Heo * gcwq->lock is held and there's no work to process, sleep. 924c8e55f36STejun Heo * Workers are woken up only while holding gcwq->lock, so 925c8e55f36STejun Heo * setting the current state before releasing gcwq->lock is 926c8e55f36STejun Heo * enough to prevent losing any event. 927c8e55f36STejun Heo */ 928c8e55f36STejun Heo worker_enter_idle(worker); 929c8e55f36STejun Heo __set_current_state(TASK_INTERRUPTIBLE); 9308b03ae3cSTejun Heo spin_unlock_irq(&gcwq->lock); 931c8e55f36STejun Heo schedule(); 932c8e55f36STejun Heo goto woke_up; 9331da177e4SLinus Torvalds } 9341da177e4SLinus Torvalds 935fc2e4d70SOleg Nesterov struct wq_barrier { 936fc2e4d70SOleg Nesterov struct work_struct work; 937fc2e4d70SOleg Nesterov struct completion done; 938fc2e4d70SOleg Nesterov }; 939fc2e4d70SOleg Nesterov 940fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work) 941fc2e4d70SOleg Nesterov { 942fc2e4d70SOleg Nesterov struct wq_barrier *barr = container_of(work, struct wq_barrier, work); 943fc2e4d70SOleg Nesterov complete(&barr->done); 944fc2e4d70SOleg Nesterov } 945fc2e4d70SOleg Nesterov 9464690c4abSTejun Heo /** 9474690c4abSTejun Heo * insert_wq_barrier - insert a barrier work 9484690c4abSTejun Heo * @cwq: cwq to insert barrier into 9494690c4abSTejun Heo * @barr: wq_barrier to insert 950affee4b2STejun Heo * @target: target work to attach @barr to 951affee4b2STejun Heo * @worker: worker currently executing @target, NULL if @target is not executing 9524690c4abSTejun Heo * 953affee4b2STejun Heo * @barr is linked to @target such that @barr is completed only after 954affee4b2STejun Heo * @target finishes execution. Please note that the ordering 955affee4b2STejun Heo * guarantee is observed only with respect to @target and on the local 956affee4b2STejun Heo * cpu. 957affee4b2STejun Heo * 958affee4b2STejun Heo * Currently, a queued barrier can't be canceled. This is because 959affee4b2STejun Heo * try_to_grab_pending() can't determine whether the work to be 960affee4b2STejun Heo * grabbed is at the head of the queue and thus can't clear LINKED 961affee4b2STejun Heo * flag of the previous work while there must be a valid next work 962affee4b2STejun Heo * after a work with LINKED flag set. 963affee4b2STejun Heo * 964affee4b2STejun Heo * Note that when @worker is non-NULL, @target may be modified 965affee4b2STejun Heo * underneath us, so we can't reliably determine cwq from @target. 9664690c4abSTejun Heo * 9674690c4abSTejun Heo * CONTEXT: 9688b03ae3cSTejun Heo * spin_lock_irq(gcwq->lock). 9694690c4abSTejun Heo */ 97083c22520SOleg Nesterov static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, 971affee4b2STejun Heo struct wq_barrier *barr, 972affee4b2STejun Heo struct work_struct *target, struct worker *worker) 973fc2e4d70SOleg Nesterov { 974affee4b2STejun Heo struct list_head *head; 975affee4b2STejun Heo unsigned int linked = 0; 976affee4b2STejun Heo 977dc186ad7SThomas Gleixner /* 9788b03ae3cSTejun Heo * debugobject calls are safe here even with gcwq->lock locked 979dc186ad7SThomas Gleixner * as we know for sure that this will not trigger any of the 980dc186ad7SThomas Gleixner * checks and call back into the fixup functions where we 981dc186ad7SThomas Gleixner * might deadlock. 982dc186ad7SThomas Gleixner */ 983dc186ad7SThomas Gleixner INIT_WORK_ON_STACK(&barr->work, wq_barrier_func); 98422df02bbSTejun Heo __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); 985fc2e4d70SOleg Nesterov init_completion(&barr->done); 98683c22520SOleg Nesterov 987affee4b2STejun Heo /* 988affee4b2STejun Heo * If @target is currently being executed, schedule the 989affee4b2STejun Heo * barrier to the worker; otherwise, put it after @target. 990affee4b2STejun Heo */ 991affee4b2STejun Heo if (worker) 992affee4b2STejun Heo head = worker->scheduled.next; 993affee4b2STejun Heo else { 994affee4b2STejun Heo unsigned long *bits = work_data_bits(target); 995affee4b2STejun Heo 996affee4b2STejun Heo head = target->entry.next; 997affee4b2STejun Heo /* there can already be other linked works, inherit and set */ 998affee4b2STejun Heo linked = *bits & WORK_STRUCT_LINKED; 999affee4b2STejun Heo __set_bit(WORK_STRUCT_LINKED_BIT, bits); 1000affee4b2STejun Heo } 1001affee4b2STejun Heo 1002dc186ad7SThomas Gleixner debug_work_activate(&barr->work); 1003affee4b2STejun Heo insert_work(cwq, &barr->work, head, 1004affee4b2STejun Heo work_color_to_flags(WORK_NO_COLOR) | linked); 1005fc2e4d70SOleg Nesterov } 1006fc2e4d70SOleg Nesterov 100773f53c4aSTejun Heo /** 100873f53c4aSTejun Heo * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing 100973f53c4aSTejun Heo * @wq: workqueue being flushed 101073f53c4aSTejun Heo * @flush_color: new flush color, < 0 for no-op 101173f53c4aSTejun Heo * @work_color: new work color, < 0 for no-op 101273f53c4aSTejun Heo * 101373f53c4aSTejun Heo * Prepare cwqs for workqueue flushing. 101473f53c4aSTejun Heo * 101573f53c4aSTejun Heo * If @flush_color is non-negative, flush_color on all cwqs should be 101673f53c4aSTejun Heo * -1. If no cwq has in-flight commands at the specified color, all 101773f53c4aSTejun Heo * cwq->flush_color's stay at -1 and %false is returned. If any cwq 101873f53c4aSTejun Heo * has in flight commands, its cwq->flush_color is set to 101973f53c4aSTejun Heo * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq 102073f53c4aSTejun Heo * wakeup logic is armed and %true is returned. 102173f53c4aSTejun Heo * 102273f53c4aSTejun Heo * The caller should have initialized @wq->first_flusher prior to 102373f53c4aSTejun Heo * calling this function with non-negative @flush_color. If 102473f53c4aSTejun Heo * @flush_color is negative, no flush color update is done and %false 102573f53c4aSTejun Heo * is returned. 102673f53c4aSTejun Heo * 102773f53c4aSTejun Heo * If @work_color is non-negative, all cwqs should have the same 102873f53c4aSTejun Heo * work_color which is previous to @work_color and all will be 102973f53c4aSTejun Heo * advanced to @work_color. 103073f53c4aSTejun Heo * 103173f53c4aSTejun Heo * CONTEXT: 103273f53c4aSTejun Heo * mutex_lock(wq->flush_mutex). 103373f53c4aSTejun Heo * 103473f53c4aSTejun Heo * RETURNS: 103573f53c4aSTejun Heo * %true if @flush_color >= 0 and there's something to flush. %false 103673f53c4aSTejun Heo * otherwise. 103773f53c4aSTejun Heo */ 103873f53c4aSTejun Heo static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq, 103973f53c4aSTejun Heo int flush_color, int work_color) 10401da177e4SLinus Torvalds { 104173f53c4aSTejun Heo bool wait = false; 104273f53c4aSTejun Heo unsigned int cpu; 10431da177e4SLinus Torvalds 104473f53c4aSTejun Heo if (flush_color >= 0) { 104573f53c4aSTejun Heo BUG_ON(atomic_read(&wq->nr_cwqs_to_flush)); 104673f53c4aSTejun Heo atomic_set(&wq->nr_cwqs_to_flush, 1); 104773f53c4aSTejun Heo } 104873f53c4aSTejun Heo 104973f53c4aSTejun Heo for_each_possible_cpu(cpu) { 105073f53c4aSTejun Heo struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 10518b03ae3cSTejun Heo struct global_cwq *gcwq = cwq->gcwq; 10522355b70fSLai Jiangshan 10538b03ae3cSTejun Heo spin_lock_irq(&gcwq->lock); 105473f53c4aSTejun Heo 105573f53c4aSTejun Heo if (flush_color >= 0) { 105673f53c4aSTejun Heo BUG_ON(cwq->flush_color != -1); 105773f53c4aSTejun Heo 105873f53c4aSTejun Heo if (cwq->nr_in_flight[flush_color]) { 105973f53c4aSTejun Heo cwq->flush_color = flush_color; 106073f53c4aSTejun Heo atomic_inc(&wq->nr_cwqs_to_flush); 106173f53c4aSTejun Heo wait = true; 106283c22520SOleg Nesterov } 106373f53c4aSTejun Heo } 106473f53c4aSTejun Heo 106573f53c4aSTejun Heo if (work_color >= 0) { 106673f53c4aSTejun Heo BUG_ON(work_color != work_next_color(cwq->work_color)); 106773f53c4aSTejun Heo cwq->work_color = work_color; 106873f53c4aSTejun Heo } 106973f53c4aSTejun Heo 10708b03ae3cSTejun Heo spin_unlock_irq(&gcwq->lock); 1071dc186ad7SThomas Gleixner } 107214441960SOleg Nesterov 107373f53c4aSTejun Heo if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush)) 107473f53c4aSTejun Heo complete(&wq->first_flusher->done); 107573f53c4aSTejun Heo 107673f53c4aSTejun Heo return wait; 107783c22520SOleg Nesterov } 10781da177e4SLinus Torvalds 10790fcb78c2SRolf Eike Beer /** 10801da177e4SLinus Torvalds * flush_workqueue - ensure that any scheduled work has run to completion. 10810fcb78c2SRolf Eike Beer * @wq: workqueue to flush 10821da177e4SLinus Torvalds * 10831da177e4SLinus Torvalds * Forces execution of the workqueue and blocks until its completion. 10841da177e4SLinus Torvalds * This is typically used in driver shutdown handlers. 10851da177e4SLinus Torvalds * 1086fc2e4d70SOleg Nesterov * We sleep until all works which were queued on entry have been handled, 1087fc2e4d70SOleg Nesterov * but we are not livelocked by new incoming ones. 10881da177e4SLinus Torvalds */ 10897ad5b3a5SHarvey Harrison void flush_workqueue(struct workqueue_struct *wq) 10901da177e4SLinus Torvalds { 109173f53c4aSTejun Heo struct wq_flusher this_flusher = { 109273f53c4aSTejun Heo .list = LIST_HEAD_INIT(this_flusher.list), 109373f53c4aSTejun Heo .flush_color = -1, 109473f53c4aSTejun Heo .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done), 109573f53c4aSTejun Heo }; 109673f53c4aSTejun Heo int next_color; 1097b1f4ec17SOleg Nesterov 10983295f0efSIngo Molnar lock_map_acquire(&wq->lockdep_map); 10993295f0efSIngo Molnar lock_map_release(&wq->lockdep_map); 110073f53c4aSTejun Heo 110173f53c4aSTejun Heo mutex_lock(&wq->flush_mutex); 110273f53c4aSTejun Heo 110373f53c4aSTejun Heo /* 110473f53c4aSTejun Heo * Start-to-wait phase 110573f53c4aSTejun Heo */ 110673f53c4aSTejun Heo next_color = work_next_color(wq->work_color); 110773f53c4aSTejun Heo 110873f53c4aSTejun Heo if (next_color != wq->flush_color) { 110973f53c4aSTejun Heo /* 111073f53c4aSTejun Heo * Color space is not full. The current work_color 111173f53c4aSTejun Heo * becomes our flush_color and work_color is advanced 111273f53c4aSTejun Heo * by one. 111373f53c4aSTejun Heo */ 111473f53c4aSTejun Heo BUG_ON(!list_empty(&wq->flusher_overflow)); 111573f53c4aSTejun Heo this_flusher.flush_color = wq->work_color; 111673f53c4aSTejun Heo wq->work_color = next_color; 111773f53c4aSTejun Heo 111873f53c4aSTejun Heo if (!wq->first_flusher) { 111973f53c4aSTejun Heo /* no flush in progress, become the first flusher */ 112073f53c4aSTejun Heo BUG_ON(wq->flush_color != this_flusher.flush_color); 112173f53c4aSTejun Heo 112273f53c4aSTejun Heo wq->first_flusher = &this_flusher; 112373f53c4aSTejun Heo 112473f53c4aSTejun Heo if (!flush_workqueue_prep_cwqs(wq, wq->flush_color, 112573f53c4aSTejun Heo wq->work_color)) { 112673f53c4aSTejun Heo /* nothing to flush, done */ 112773f53c4aSTejun Heo wq->flush_color = next_color; 112873f53c4aSTejun Heo wq->first_flusher = NULL; 112973f53c4aSTejun Heo goto out_unlock; 113073f53c4aSTejun Heo } 113173f53c4aSTejun Heo } else { 113273f53c4aSTejun Heo /* wait in queue */ 113373f53c4aSTejun Heo BUG_ON(wq->flush_color == this_flusher.flush_color); 113473f53c4aSTejun Heo list_add_tail(&this_flusher.list, &wq->flusher_queue); 113573f53c4aSTejun Heo flush_workqueue_prep_cwqs(wq, -1, wq->work_color); 113673f53c4aSTejun Heo } 113773f53c4aSTejun Heo } else { 113873f53c4aSTejun Heo /* 113973f53c4aSTejun Heo * Oops, color space is full, wait on overflow queue. 114073f53c4aSTejun Heo * The next flush completion will assign us 114173f53c4aSTejun Heo * flush_color and transfer to flusher_queue. 114273f53c4aSTejun Heo */ 114373f53c4aSTejun Heo list_add_tail(&this_flusher.list, &wq->flusher_overflow); 114473f53c4aSTejun Heo } 114573f53c4aSTejun Heo 114673f53c4aSTejun Heo mutex_unlock(&wq->flush_mutex); 114773f53c4aSTejun Heo 114873f53c4aSTejun Heo wait_for_completion(&this_flusher.done); 114973f53c4aSTejun Heo 115073f53c4aSTejun Heo /* 115173f53c4aSTejun Heo * Wake-up-and-cascade phase 115273f53c4aSTejun Heo * 115373f53c4aSTejun Heo * First flushers are responsible for cascading flushes and 115473f53c4aSTejun Heo * handling overflow. Non-first flushers can simply return. 115573f53c4aSTejun Heo */ 115673f53c4aSTejun Heo if (wq->first_flusher != &this_flusher) 115773f53c4aSTejun Heo return; 115873f53c4aSTejun Heo 115973f53c4aSTejun Heo mutex_lock(&wq->flush_mutex); 116073f53c4aSTejun Heo 116173f53c4aSTejun Heo wq->first_flusher = NULL; 116273f53c4aSTejun Heo 116373f53c4aSTejun Heo BUG_ON(!list_empty(&this_flusher.list)); 116473f53c4aSTejun Heo BUG_ON(wq->flush_color != this_flusher.flush_color); 116573f53c4aSTejun Heo 116673f53c4aSTejun Heo while (true) { 116773f53c4aSTejun Heo struct wq_flusher *next, *tmp; 116873f53c4aSTejun Heo 116973f53c4aSTejun Heo /* complete all the flushers sharing the current flush color */ 117073f53c4aSTejun Heo list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { 117173f53c4aSTejun Heo if (next->flush_color != wq->flush_color) 117273f53c4aSTejun Heo break; 117373f53c4aSTejun Heo list_del_init(&next->list); 117473f53c4aSTejun Heo complete(&next->done); 117573f53c4aSTejun Heo } 117673f53c4aSTejun Heo 117773f53c4aSTejun Heo BUG_ON(!list_empty(&wq->flusher_overflow) && 117873f53c4aSTejun Heo wq->flush_color != work_next_color(wq->work_color)); 117973f53c4aSTejun Heo 118073f53c4aSTejun Heo /* this flush_color is finished, advance by one */ 118173f53c4aSTejun Heo wq->flush_color = work_next_color(wq->flush_color); 118273f53c4aSTejun Heo 118373f53c4aSTejun Heo /* one color has been freed, handle overflow queue */ 118473f53c4aSTejun Heo if (!list_empty(&wq->flusher_overflow)) { 118573f53c4aSTejun Heo /* 118673f53c4aSTejun Heo * Assign the same color to all overflowed 118773f53c4aSTejun Heo * flushers, advance work_color and append to 118873f53c4aSTejun Heo * flusher_queue. This is the start-to-wait 118973f53c4aSTejun Heo * phase for these overflowed flushers. 119073f53c4aSTejun Heo */ 119173f53c4aSTejun Heo list_for_each_entry(tmp, &wq->flusher_overflow, list) 119273f53c4aSTejun Heo tmp->flush_color = wq->work_color; 119373f53c4aSTejun Heo 119473f53c4aSTejun Heo wq->work_color = work_next_color(wq->work_color); 119573f53c4aSTejun Heo 119673f53c4aSTejun Heo list_splice_tail_init(&wq->flusher_overflow, 119773f53c4aSTejun Heo &wq->flusher_queue); 119873f53c4aSTejun Heo flush_workqueue_prep_cwqs(wq, -1, wq->work_color); 119973f53c4aSTejun Heo } 120073f53c4aSTejun Heo 120173f53c4aSTejun Heo if (list_empty(&wq->flusher_queue)) { 120273f53c4aSTejun Heo BUG_ON(wq->flush_color != wq->work_color); 120373f53c4aSTejun Heo break; 120473f53c4aSTejun Heo } 120573f53c4aSTejun Heo 120673f53c4aSTejun Heo /* 120773f53c4aSTejun Heo * Need to flush more colors. Make the next flusher 120873f53c4aSTejun Heo * the new first flusher and arm cwqs. 120973f53c4aSTejun Heo */ 121073f53c4aSTejun Heo BUG_ON(wq->flush_color == wq->work_color); 121173f53c4aSTejun Heo BUG_ON(wq->flush_color != next->flush_color); 121273f53c4aSTejun Heo 121373f53c4aSTejun Heo list_del_init(&next->list); 121473f53c4aSTejun Heo wq->first_flusher = next; 121573f53c4aSTejun Heo 121673f53c4aSTejun Heo if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1)) 121773f53c4aSTejun Heo break; 121873f53c4aSTejun Heo 121973f53c4aSTejun Heo /* 122073f53c4aSTejun Heo * Meh... this color is already done, clear first 122173f53c4aSTejun Heo * flusher and repeat cascading. 122273f53c4aSTejun Heo */ 122373f53c4aSTejun Heo wq->first_flusher = NULL; 122473f53c4aSTejun Heo } 122573f53c4aSTejun Heo 122673f53c4aSTejun Heo out_unlock: 122773f53c4aSTejun Heo mutex_unlock(&wq->flush_mutex); 12281da177e4SLinus Torvalds } 1229ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue); 12301da177e4SLinus Torvalds 1231db700897SOleg Nesterov /** 1232db700897SOleg Nesterov * flush_work - block until a work_struct's callback has terminated 1233db700897SOleg Nesterov * @work: the work which is to be flushed 1234db700897SOleg Nesterov * 1235a67da70dSOleg Nesterov * Returns false if @work has already terminated. 1236a67da70dSOleg Nesterov * 1237db700897SOleg Nesterov * It is expected that, prior to calling flush_work(), the caller has 1238db700897SOleg Nesterov * arranged for the work to not be requeued, otherwise it doesn't make 1239db700897SOleg Nesterov * sense to use this function. 1240db700897SOleg Nesterov */ 1241db700897SOleg Nesterov int flush_work(struct work_struct *work) 1242db700897SOleg Nesterov { 1243affee4b2STejun Heo struct worker *worker = NULL; 1244db700897SOleg Nesterov struct cpu_workqueue_struct *cwq; 12458b03ae3cSTejun Heo struct global_cwq *gcwq; 1246db700897SOleg Nesterov struct wq_barrier barr; 1247db700897SOleg Nesterov 1248db700897SOleg Nesterov might_sleep(); 1249db700897SOleg Nesterov cwq = get_wq_data(work); 1250db700897SOleg Nesterov if (!cwq) 1251db700897SOleg Nesterov return 0; 12528b03ae3cSTejun Heo gcwq = cwq->gcwq; 1253db700897SOleg Nesterov 12543295f0efSIngo Molnar lock_map_acquire(&cwq->wq->lockdep_map); 12553295f0efSIngo Molnar lock_map_release(&cwq->wq->lockdep_map); 1256a67da70dSOleg Nesterov 12578b03ae3cSTejun Heo spin_lock_irq(&gcwq->lock); 1258db700897SOleg Nesterov if (!list_empty(&work->entry)) { 1259db700897SOleg Nesterov /* 1260db700897SOleg Nesterov * See the comment near try_to_grab_pending()->smp_rmb(). 1261db700897SOleg Nesterov * If it was re-queued under us we are not going to wait. 1262db700897SOleg Nesterov */ 1263db700897SOleg Nesterov smp_rmb(); 1264db700897SOleg Nesterov if (unlikely(cwq != get_wq_data(work))) 12654690c4abSTejun Heo goto already_gone; 1266db700897SOleg Nesterov } else { 1267affee4b2STejun Heo if (cwq->worker && cwq->worker->current_work == work) 1268affee4b2STejun Heo worker = cwq->worker; 1269affee4b2STejun Heo if (!worker) 12704690c4abSTejun Heo goto already_gone; 1271db700897SOleg Nesterov } 1272db700897SOleg Nesterov 1273affee4b2STejun Heo insert_wq_barrier(cwq, &barr, work, worker); 12748b03ae3cSTejun Heo spin_unlock_irq(&gcwq->lock); 1275db700897SOleg Nesterov wait_for_completion(&barr.done); 1276dc186ad7SThomas Gleixner destroy_work_on_stack(&barr.work); 1277db700897SOleg Nesterov return 1; 12784690c4abSTejun Heo already_gone: 12798b03ae3cSTejun Heo spin_unlock_irq(&gcwq->lock); 12804690c4abSTejun Heo return 0; 1281db700897SOleg Nesterov } 1282db700897SOleg Nesterov EXPORT_SYMBOL_GPL(flush_work); 1283db700897SOleg Nesterov 12846e84d644SOleg Nesterov /* 12851f1f642eSOleg Nesterov * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, 12866e84d644SOleg Nesterov * so this work can't be re-armed in any way. 12876e84d644SOleg Nesterov */ 12886e84d644SOleg Nesterov static int try_to_grab_pending(struct work_struct *work) 12896e84d644SOleg Nesterov { 12908b03ae3cSTejun Heo struct global_cwq *gcwq; 12916e84d644SOleg Nesterov struct cpu_workqueue_struct *cwq; 12921f1f642eSOleg Nesterov int ret = -1; 12936e84d644SOleg Nesterov 129422df02bbSTejun Heo if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) 12951f1f642eSOleg Nesterov return 0; 12966e84d644SOleg Nesterov 12976e84d644SOleg Nesterov /* 12986e84d644SOleg Nesterov * The queueing is in progress, or it is already queued. Try to 12996e84d644SOleg Nesterov * steal it from ->worklist without clearing WORK_STRUCT_PENDING. 13006e84d644SOleg Nesterov */ 13016e84d644SOleg Nesterov 13026e84d644SOleg Nesterov cwq = get_wq_data(work); 13036e84d644SOleg Nesterov if (!cwq) 13046e84d644SOleg Nesterov return ret; 13058b03ae3cSTejun Heo gcwq = cwq->gcwq; 13066e84d644SOleg Nesterov 13078b03ae3cSTejun Heo spin_lock_irq(&gcwq->lock); 13086e84d644SOleg Nesterov if (!list_empty(&work->entry)) { 13096e84d644SOleg Nesterov /* 13106e84d644SOleg Nesterov * This work is queued, but perhaps we locked the wrong cwq. 13116e84d644SOleg Nesterov * In that case we must see the new value after rmb(), see 13126e84d644SOleg Nesterov * insert_work()->wmb(). 13136e84d644SOleg Nesterov */ 13146e84d644SOleg Nesterov smp_rmb(); 13156e84d644SOleg Nesterov if (cwq == get_wq_data(work)) { 1316dc186ad7SThomas Gleixner debug_work_deactivate(work); 13176e84d644SOleg Nesterov list_del_init(&work->entry); 131873f53c4aSTejun Heo cwq_dec_nr_in_flight(cwq, get_work_color(work)); 13196e84d644SOleg Nesterov ret = 1; 13206e84d644SOleg Nesterov } 13216e84d644SOleg Nesterov } 13228b03ae3cSTejun Heo spin_unlock_irq(&gcwq->lock); 13236e84d644SOleg Nesterov 13246e84d644SOleg Nesterov return ret; 13256e84d644SOleg Nesterov } 13266e84d644SOleg Nesterov 13276e84d644SOleg Nesterov static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, 1328b89deed3SOleg Nesterov struct work_struct *work) 1329b89deed3SOleg Nesterov { 13308b03ae3cSTejun Heo struct global_cwq *gcwq = cwq->gcwq; 1331b89deed3SOleg Nesterov struct wq_barrier barr; 1332affee4b2STejun Heo struct worker *worker; 1333b89deed3SOleg Nesterov 13348b03ae3cSTejun Heo spin_lock_irq(&gcwq->lock); 1335affee4b2STejun Heo 1336affee4b2STejun Heo worker = NULL; 1337c34056a3STejun Heo if (unlikely(cwq->worker && cwq->worker->current_work == work)) { 1338affee4b2STejun Heo worker = cwq->worker; 1339affee4b2STejun Heo insert_wq_barrier(cwq, &barr, work, worker); 1340b89deed3SOleg Nesterov } 1341affee4b2STejun Heo 13428b03ae3cSTejun Heo spin_unlock_irq(&gcwq->lock); 1343b89deed3SOleg Nesterov 1344affee4b2STejun Heo if (unlikely(worker)) { 1345b89deed3SOleg Nesterov wait_for_completion(&barr.done); 1346dc186ad7SThomas Gleixner destroy_work_on_stack(&barr.work); 1347dc186ad7SThomas Gleixner } 1348b89deed3SOleg Nesterov } 1349b89deed3SOleg Nesterov 13506e84d644SOleg Nesterov static void wait_on_work(struct work_struct *work) 1351b89deed3SOleg Nesterov { 1352b89deed3SOleg Nesterov struct cpu_workqueue_struct *cwq; 135328e53bddSOleg Nesterov struct workqueue_struct *wq; 1354b1f4ec17SOleg Nesterov int cpu; 1355b89deed3SOleg Nesterov 1356f293ea92SOleg Nesterov might_sleep(); 1357f293ea92SOleg Nesterov 13583295f0efSIngo Molnar lock_map_acquire(&work->lockdep_map); 13593295f0efSIngo Molnar lock_map_release(&work->lockdep_map); 13604e6045f1SJohannes Berg 1361b89deed3SOleg Nesterov cwq = get_wq_data(work); 1362b89deed3SOleg Nesterov if (!cwq) 13633af24433SOleg Nesterov return; 1364b89deed3SOleg Nesterov 136528e53bddSOleg Nesterov wq = cwq->wq; 136628e53bddSOleg Nesterov 13671537663fSTejun Heo for_each_possible_cpu(cpu) 13684690c4abSTejun Heo wait_on_cpu_work(get_cwq(cpu, wq), work); 13696e84d644SOleg Nesterov } 13706e84d644SOleg Nesterov 13711f1f642eSOleg Nesterov static int __cancel_work_timer(struct work_struct *work, 13721f1f642eSOleg Nesterov struct timer_list* timer) 13731f1f642eSOleg Nesterov { 13741f1f642eSOleg Nesterov int ret; 13751f1f642eSOleg Nesterov 13761f1f642eSOleg Nesterov do { 13771f1f642eSOleg Nesterov ret = (timer && likely(del_timer(timer))); 13781f1f642eSOleg Nesterov if (!ret) 13791f1f642eSOleg Nesterov ret = try_to_grab_pending(work); 13801f1f642eSOleg Nesterov wait_on_work(work); 13811f1f642eSOleg Nesterov } while (unlikely(ret < 0)); 13821f1f642eSOleg Nesterov 13834d707b9fSOleg Nesterov clear_wq_data(work); 13841f1f642eSOleg Nesterov return ret; 13851f1f642eSOleg Nesterov } 13861f1f642eSOleg Nesterov 13876e84d644SOleg Nesterov /** 13886e84d644SOleg Nesterov * cancel_work_sync - block until a work_struct's callback has terminated 13896e84d644SOleg Nesterov * @work: the work which is to be flushed 13906e84d644SOleg Nesterov * 13911f1f642eSOleg Nesterov * Returns true if @work was pending. 13921f1f642eSOleg Nesterov * 13936e84d644SOleg Nesterov * cancel_work_sync() will cancel the work if it is queued. If the work's 13946e84d644SOleg Nesterov * callback appears to be running, cancel_work_sync() will block until it 13956e84d644SOleg Nesterov * has completed. 13966e84d644SOleg Nesterov * 13976e84d644SOleg Nesterov * It is possible to use this function if the work re-queues itself. It can 13986e84d644SOleg Nesterov * cancel the work even if it migrates to another workqueue, however in that 13996e84d644SOleg Nesterov * case it only guarantees that work->func() has completed on the last queued 14006e84d644SOleg Nesterov * workqueue. 14016e84d644SOleg Nesterov * 14026e84d644SOleg Nesterov * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not 14036e84d644SOleg Nesterov * pending, otherwise it goes into a busy-wait loop until the timer expires. 14046e84d644SOleg Nesterov * 14056e84d644SOleg Nesterov * The caller must ensure that workqueue_struct on which this work was last 14066e84d644SOleg Nesterov * queued can't be destroyed before this function returns. 14076e84d644SOleg Nesterov */ 14081f1f642eSOleg Nesterov int cancel_work_sync(struct work_struct *work) 14096e84d644SOleg Nesterov { 14101f1f642eSOleg Nesterov return __cancel_work_timer(work, NULL); 1411b89deed3SOleg Nesterov } 141228e53bddSOleg Nesterov EXPORT_SYMBOL_GPL(cancel_work_sync); 1413b89deed3SOleg Nesterov 14146e84d644SOleg Nesterov /** 1415f5a421a4SOleg Nesterov * cancel_delayed_work_sync - reliably kill off a delayed work. 14166e84d644SOleg Nesterov * @dwork: the delayed work struct 14176e84d644SOleg Nesterov * 14181f1f642eSOleg Nesterov * Returns true if @dwork was pending. 14191f1f642eSOleg Nesterov * 14206e84d644SOleg Nesterov * It is possible to use this function if @dwork rearms itself via queue_work() 14216e84d644SOleg Nesterov * or queue_delayed_work(). See also the comment for cancel_work_sync(). 14226e84d644SOleg Nesterov */ 14231f1f642eSOleg Nesterov int cancel_delayed_work_sync(struct delayed_work *dwork) 14246e84d644SOleg Nesterov { 14251f1f642eSOleg Nesterov return __cancel_work_timer(&dwork->work, &dwork->timer); 14266e84d644SOleg Nesterov } 1427f5a421a4SOleg Nesterov EXPORT_SYMBOL(cancel_delayed_work_sync); 14281da177e4SLinus Torvalds 14296e84d644SOleg Nesterov static struct workqueue_struct *keventd_wq __read_mostly; 14301da177e4SLinus Torvalds 14310fcb78c2SRolf Eike Beer /** 14320fcb78c2SRolf Eike Beer * schedule_work - put work task in global workqueue 14330fcb78c2SRolf Eike Beer * @work: job to be done 14340fcb78c2SRolf Eike Beer * 14355b0f437dSBart Van Assche * Returns zero if @work was already on the kernel-global workqueue and 14365b0f437dSBart Van Assche * non-zero otherwise. 14375b0f437dSBart Van Assche * 14385b0f437dSBart Van Assche * This puts a job in the kernel-global workqueue if it was not already 14395b0f437dSBart Van Assche * queued and leaves it in the same position on the kernel-global 14405b0f437dSBart Van Assche * workqueue otherwise. 14410fcb78c2SRolf Eike Beer */ 14427ad5b3a5SHarvey Harrison int schedule_work(struct work_struct *work) 14431da177e4SLinus Torvalds { 14441da177e4SLinus Torvalds return queue_work(keventd_wq, work); 14451da177e4SLinus Torvalds } 1446ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work); 14471da177e4SLinus Torvalds 1448c1a220e7SZhang Rui /* 1449c1a220e7SZhang Rui * schedule_work_on - put work task on a specific cpu 1450c1a220e7SZhang Rui * @cpu: cpu to put the work task on 1451c1a220e7SZhang Rui * @work: job to be done 1452c1a220e7SZhang Rui * 1453c1a220e7SZhang Rui * This puts a job on a specific cpu 1454c1a220e7SZhang Rui */ 1455c1a220e7SZhang Rui int schedule_work_on(int cpu, struct work_struct *work) 1456c1a220e7SZhang Rui { 1457c1a220e7SZhang Rui return queue_work_on(cpu, keventd_wq, work); 1458c1a220e7SZhang Rui } 1459c1a220e7SZhang Rui EXPORT_SYMBOL(schedule_work_on); 1460c1a220e7SZhang Rui 14610fcb78c2SRolf Eike Beer /** 14620fcb78c2SRolf Eike Beer * schedule_delayed_work - put work task in global workqueue after delay 146352bad64dSDavid Howells * @dwork: job to be done 146452bad64dSDavid Howells * @delay: number of jiffies to wait or 0 for immediate execution 14650fcb78c2SRolf Eike Beer * 14660fcb78c2SRolf Eike Beer * After waiting for a given time this puts a job in the kernel-global 14670fcb78c2SRolf Eike Beer * workqueue. 14680fcb78c2SRolf Eike Beer */ 14697ad5b3a5SHarvey Harrison int schedule_delayed_work(struct delayed_work *dwork, 147082f67cd9SIngo Molnar unsigned long delay) 14711da177e4SLinus Torvalds { 147252bad64dSDavid Howells return queue_delayed_work(keventd_wq, dwork, delay); 14731da177e4SLinus Torvalds } 1474ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work); 14751da177e4SLinus Torvalds 14760fcb78c2SRolf Eike Beer /** 14778c53e463SLinus Torvalds * flush_delayed_work - block until a dwork_struct's callback has terminated 14788c53e463SLinus Torvalds * @dwork: the delayed work which is to be flushed 14798c53e463SLinus Torvalds * 14808c53e463SLinus Torvalds * Any timeout is cancelled, and any pending work is run immediately. 14818c53e463SLinus Torvalds */ 14828c53e463SLinus Torvalds void flush_delayed_work(struct delayed_work *dwork) 14838c53e463SLinus Torvalds { 14848c53e463SLinus Torvalds if (del_timer_sync(&dwork->timer)) { 14854690c4abSTejun Heo __queue_work(get_cpu(), get_wq_data(&dwork->work)->wq, 14864690c4abSTejun Heo &dwork->work); 14878c53e463SLinus Torvalds put_cpu(); 14888c53e463SLinus Torvalds } 14898c53e463SLinus Torvalds flush_work(&dwork->work); 14908c53e463SLinus Torvalds } 14918c53e463SLinus Torvalds EXPORT_SYMBOL(flush_delayed_work); 14928c53e463SLinus Torvalds 14938c53e463SLinus Torvalds /** 14940fcb78c2SRolf Eike Beer * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 14950fcb78c2SRolf Eike Beer * @cpu: cpu to use 149652bad64dSDavid Howells * @dwork: job to be done 14970fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait 14980fcb78c2SRolf Eike Beer * 14990fcb78c2SRolf Eike Beer * After waiting for a given time this puts a job in the kernel-global 15000fcb78c2SRolf Eike Beer * workqueue on the specified CPU. 15010fcb78c2SRolf Eike Beer */ 15021da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu, 150352bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 15041da177e4SLinus Torvalds { 150552bad64dSDavid Howells return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); 15061da177e4SLinus Torvalds } 1507ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on); 15081da177e4SLinus Torvalds 1509b6136773SAndrew Morton /** 1510b6136773SAndrew Morton * schedule_on_each_cpu - call a function on each online CPU from keventd 1511b6136773SAndrew Morton * @func: the function to call 1512b6136773SAndrew Morton * 1513b6136773SAndrew Morton * Returns zero on success. 1514b6136773SAndrew Morton * Returns -ve errno on failure. 1515b6136773SAndrew Morton * 1516b6136773SAndrew Morton * schedule_on_each_cpu() is very slow. 1517b6136773SAndrew Morton */ 151865f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func) 151915316ba8SChristoph Lameter { 152015316ba8SChristoph Lameter int cpu; 152165a64464SAndi Kleen int orig = -1; 1522b6136773SAndrew Morton struct work_struct *works; 152315316ba8SChristoph Lameter 1524b6136773SAndrew Morton works = alloc_percpu(struct work_struct); 1525b6136773SAndrew Morton if (!works) 152615316ba8SChristoph Lameter return -ENOMEM; 1527b6136773SAndrew Morton 152895402b38SGautham R Shenoy get_online_cpus(); 152993981800STejun Heo 153093981800STejun Heo /* 153193981800STejun Heo * When running in keventd don't schedule a work item on 153293981800STejun Heo * itself. Can just call directly because the work queue is 153393981800STejun Heo * already bound. This also is faster. 153493981800STejun Heo */ 153593981800STejun Heo if (current_is_keventd()) 153693981800STejun Heo orig = raw_smp_processor_id(); 153793981800STejun Heo 153815316ba8SChristoph Lameter for_each_online_cpu(cpu) { 15399bfb1839SIngo Molnar struct work_struct *work = per_cpu_ptr(works, cpu); 15409bfb1839SIngo Molnar 15419bfb1839SIngo Molnar INIT_WORK(work, func); 154293981800STejun Heo if (cpu != orig) 15438de6d308SOleg Nesterov schedule_work_on(cpu, work); 154415316ba8SChristoph Lameter } 154593981800STejun Heo if (orig >= 0) 154693981800STejun Heo func(per_cpu_ptr(works, orig)); 154793981800STejun Heo 154893981800STejun Heo for_each_online_cpu(cpu) 15498616a89aSOleg Nesterov flush_work(per_cpu_ptr(works, cpu)); 155093981800STejun Heo 155195402b38SGautham R Shenoy put_online_cpus(); 1552b6136773SAndrew Morton free_percpu(works); 155315316ba8SChristoph Lameter return 0; 155415316ba8SChristoph Lameter } 155515316ba8SChristoph Lameter 1556eef6a7d5SAlan Stern /** 1557eef6a7d5SAlan Stern * flush_scheduled_work - ensure that any scheduled work has run to completion. 1558eef6a7d5SAlan Stern * 1559eef6a7d5SAlan Stern * Forces execution of the kernel-global workqueue and blocks until its 1560eef6a7d5SAlan Stern * completion. 1561eef6a7d5SAlan Stern * 1562eef6a7d5SAlan Stern * Think twice before calling this function! It's very easy to get into 1563eef6a7d5SAlan Stern * trouble if you don't take great care. Either of the following situations 1564eef6a7d5SAlan Stern * will lead to deadlock: 1565eef6a7d5SAlan Stern * 1566eef6a7d5SAlan Stern * One of the work items currently on the workqueue needs to acquire 1567eef6a7d5SAlan Stern * a lock held by your code or its caller. 1568eef6a7d5SAlan Stern * 1569eef6a7d5SAlan Stern * Your code is running in the context of a work routine. 1570eef6a7d5SAlan Stern * 1571eef6a7d5SAlan Stern * They will be detected by lockdep when they occur, but the first might not 1572eef6a7d5SAlan Stern * occur very often. It depends on what work items are on the workqueue and 1573eef6a7d5SAlan Stern * what locks they need, which you have no control over. 1574eef6a7d5SAlan Stern * 1575eef6a7d5SAlan Stern * In most situations flushing the entire workqueue is overkill; you merely 1576eef6a7d5SAlan Stern * need to know that a particular work item isn't queued and isn't running. 1577eef6a7d5SAlan Stern * In such cases you should use cancel_delayed_work_sync() or 1578eef6a7d5SAlan Stern * cancel_work_sync() instead. 1579eef6a7d5SAlan Stern */ 15801da177e4SLinus Torvalds void flush_scheduled_work(void) 15811da177e4SLinus Torvalds { 15821da177e4SLinus Torvalds flush_workqueue(keventd_wq); 15831da177e4SLinus Torvalds } 1584ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work); 15851da177e4SLinus Torvalds 15861da177e4SLinus Torvalds /** 15871fa44ecaSJames Bottomley * execute_in_process_context - reliably execute the routine with user context 15881fa44ecaSJames Bottomley * @fn: the function to execute 15891fa44ecaSJames Bottomley * @ew: guaranteed storage for the execute work structure (must 15901fa44ecaSJames Bottomley * be available when the work executes) 15911fa44ecaSJames Bottomley * 15921fa44ecaSJames Bottomley * Executes the function immediately if process context is available, 15931fa44ecaSJames Bottomley * otherwise schedules the function for delayed execution. 15941fa44ecaSJames Bottomley * 15951fa44ecaSJames Bottomley * Returns: 0 - function was executed 15961fa44ecaSJames Bottomley * 1 - function was scheduled for execution 15971fa44ecaSJames Bottomley */ 159865f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew) 15991fa44ecaSJames Bottomley { 16001fa44ecaSJames Bottomley if (!in_interrupt()) { 160165f27f38SDavid Howells fn(&ew->work); 16021fa44ecaSJames Bottomley return 0; 16031fa44ecaSJames Bottomley } 16041fa44ecaSJames Bottomley 160565f27f38SDavid Howells INIT_WORK(&ew->work, fn); 16061fa44ecaSJames Bottomley schedule_work(&ew->work); 16071fa44ecaSJames Bottomley 16081fa44ecaSJames Bottomley return 1; 16091fa44ecaSJames Bottomley } 16101fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context); 16111fa44ecaSJames Bottomley 16121da177e4SLinus Torvalds int keventd_up(void) 16131da177e4SLinus Torvalds { 16141da177e4SLinus Torvalds return keventd_wq != NULL; 16151da177e4SLinus Torvalds } 16161da177e4SLinus Torvalds 16171da177e4SLinus Torvalds int current_is_keventd(void) 16181da177e4SLinus Torvalds { 16191da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq; 1620d243769dSHugh Dickins int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */ 16211da177e4SLinus Torvalds int ret = 0; 16221da177e4SLinus Torvalds 16231da177e4SLinus Torvalds BUG_ON(!keventd_wq); 16241da177e4SLinus Torvalds 16251537663fSTejun Heo cwq = get_cwq(cpu, keventd_wq); 1626c34056a3STejun Heo if (current == cwq->worker->task) 16271da177e4SLinus Torvalds ret = 1; 16281da177e4SLinus Torvalds 16291da177e4SLinus Torvalds return ret; 16301da177e4SLinus Torvalds 16311da177e4SLinus Torvalds } 16321da177e4SLinus Torvalds 16330f900049STejun Heo static struct cpu_workqueue_struct *alloc_cwqs(void) 16340f900049STejun Heo { 16350f900049STejun Heo /* 16360f900049STejun Heo * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS. 16370f900049STejun Heo * Make sure that the alignment isn't lower than that of 16380f900049STejun Heo * unsigned long long. 16390f900049STejun Heo */ 16400f900049STejun Heo const size_t size = sizeof(struct cpu_workqueue_struct); 16410f900049STejun Heo const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS, 16420f900049STejun Heo __alignof__(unsigned long long)); 16430f900049STejun Heo struct cpu_workqueue_struct *cwqs; 16440f900049STejun Heo #ifndef CONFIG_SMP 16450f900049STejun Heo void *ptr; 16460f900049STejun Heo 16470f900049STejun Heo /* 16480f900049STejun Heo * On UP, percpu allocator doesn't honor alignment parameter 16490f900049STejun Heo * and simply uses arch-dependent default. Allocate enough 16500f900049STejun Heo * room to align cwq and put an extra pointer at the end 16510f900049STejun Heo * pointing back to the originally allocated pointer which 16520f900049STejun Heo * will be used for free. 16530f900049STejun Heo * 16540f900049STejun Heo * FIXME: This really belongs to UP percpu code. Update UP 16550f900049STejun Heo * percpu code to honor alignment and remove this ugliness. 16560f900049STejun Heo */ 16570f900049STejun Heo ptr = __alloc_percpu(size + align + sizeof(void *), 1); 16580f900049STejun Heo cwqs = PTR_ALIGN(ptr, align); 16590f900049STejun Heo *(void **)per_cpu_ptr(cwqs + 1, 0) = ptr; 16600f900049STejun Heo #else 16610f900049STejun Heo /* On SMP, percpu allocator can do it itself */ 16620f900049STejun Heo cwqs = __alloc_percpu(size, align); 16630f900049STejun Heo #endif 16640f900049STejun Heo /* just in case, make sure it's actually aligned */ 16650f900049STejun Heo BUG_ON(!IS_ALIGNED((unsigned long)cwqs, align)); 16660f900049STejun Heo return cwqs; 16670f900049STejun Heo } 16680f900049STejun Heo 16690f900049STejun Heo static void free_cwqs(struct cpu_workqueue_struct *cwqs) 16700f900049STejun Heo { 16710f900049STejun Heo #ifndef CONFIG_SMP 16720f900049STejun Heo /* on UP, the pointer to free is stored right after the cwq */ 16730f900049STejun Heo if (cwqs) 16740f900049STejun Heo free_percpu(*(void **)per_cpu_ptr(cwqs + 1, 0)); 16750f900049STejun Heo #else 16760f900049STejun Heo free_percpu(cwqs); 16770f900049STejun Heo #endif 16780f900049STejun Heo } 16790f900049STejun Heo 16804e6045f1SJohannes Berg struct workqueue_struct *__create_workqueue_key(const char *name, 168197e37d7bSTejun Heo unsigned int flags, 16821e19ffc6STejun Heo int max_active, 1683eb13ba87SJohannes Berg struct lock_class_key *key, 1684eb13ba87SJohannes Berg const char *lock_name) 16853af24433SOleg Nesterov { 16861537663fSTejun Heo bool singlethread = flags & WQ_SINGLE_THREAD; 16873af24433SOleg Nesterov struct workqueue_struct *wq; 1688c34056a3STejun Heo bool failed = false; 1689c34056a3STejun Heo unsigned int cpu; 16903af24433SOleg Nesterov 16911e19ffc6STejun Heo max_active = clamp_val(max_active, 1, INT_MAX); 16921e19ffc6STejun Heo 16933af24433SOleg Nesterov wq = kzalloc(sizeof(*wq), GFP_KERNEL); 16943af24433SOleg Nesterov if (!wq) 16954690c4abSTejun Heo goto err; 16963af24433SOleg Nesterov 16970f900049STejun Heo wq->cpu_wq = alloc_cwqs(); 16984690c4abSTejun Heo if (!wq->cpu_wq) 16994690c4abSTejun Heo goto err; 17003af24433SOleg Nesterov 170197e37d7bSTejun Heo wq->flags = flags; 1702a0a1a5fdSTejun Heo wq->saved_max_active = max_active; 170373f53c4aSTejun Heo mutex_init(&wq->flush_mutex); 170473f53c4aSTejun Heo atomic_set(&wq->nr_cwqs_to_flush, 0); 170573f53c4aSTejun Heo INIT_LIST_HEAD(&wq->flusher_queue); 170673f53c4aSTejun Heo INIT_LIST_HEAD(&wq->flusher_overflow); 17073af24433SOleg Nesterov wq->name = name; 1708eb13ba87SJohannes Berg lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); 1709cce1a165SOleg Nesterov INIT_LIST_HEAD(&wq->list); 17103af24433SOleg Nesterov 17113da1c84cSOleg Nesterov cpu_maps_update_begin(); 17126af8bf3dSOleg Nesterov /* 17136af8bf3dSOleg Nesterov * We must initialize cwqs for each possible cpu even if we 17146af8bf3dSOleg Nesterov * are going to call destroy_workqueue() finally. Otherwise 17156af8bf3dSOleg Nesterov * cpu_up() can hit the uninitialized cwq once we drop the 17166af8bf3dSOleg Nesterov * lock. 17176af8bf3dSOleg Nesterov */ 17183af24433SOleg Nesterov for_each_possible_cpu(cpu) { 17191537663fSTejun Heo struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 17208b03ae3cSTejun Heo struct global_cwq *gcwq = get_gcwq(cpu); 17211537663fSTejun Heo 17220f900049STejun Heo BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK); 17238b03ae3cSTejun Heo cwq->gcwq = gcwq; 1724c34056a3STejun Heo cwq->wq = wq; 172573f53c4aSTejun Heo cwq->flush_color = -1; 17261e19ffc6STejun Heo cwq->max_active = max_active; 17271537663fSTejun Heo INIT_LIST_HEAD(&cwq->worklist); 17281e19ffc6STejun Heo INIT_LIST_HEAD(&cwq->delayed_works); 17291537663fSTejun Heo 1730c34056a3STejun Heo if (failed) 17313af24433SOleg Nesterov continue; 1732c34056a3STejun Heo cwq->worker = create_worker(cwq, 1733c34056a3STejun Heo cpu_online(cpu) && !singlethread); 1734c34056a3STejun Heo if (cwq->worker) 1735c34056a3STejun Heo start_worker(cwq->worker); 17361537663fSTejun Heo else 1737c34056a3STejun Heo failed = true; 17383af24433SOleg Nesterov } 17391537663fSTejun Heo 1740a0a1a5fdSTejun Heo /* 1741a0a1a5fdSTejun Heo * workqueue_lock protects global freeze state and workqueues 1742a0a1a5fdSTejun Heo * list. Grab it, set max_active accordingly and add the new 1743a0a1a5fdSTejun Heo * workqueue to workqueues list. 1744a0a1a5fdSTejun Heo */ 17451537663fSTejun Heo spin_lock(&workqueue_lock); 1746a0a1a5fdSTejun Heo 1747a0a1a5fdSTejun Heo if (workqueue_freezing && wq->flags & WQ_FREEZEABLE) 1748a0a1a5fdSTejun Heo for_each_possible_cpu(cpu) 1749a0a1a5fdSTejun Heo get_cwq(cpu, wq)->max_active = 0; 1750a0a1a5fdSTejun Heo 17511537663fSTejun Heo list_add(&wq->list, &workqueues); 1752a0a1a5fdSTejun Heo 17531537663fSTejun Heo spin_unlock(&workqueue_lock); 17541537663fSTejun Heo 17553da1c84cSOleg Nesterov cpu_maps_update_done(); 17563af24433SOleg Nesterov 1757c34056a3STejun Heo if (failed) { 17583af24433SOleg Nesterov destroy_workqueue(wq); 17593af24433SOleg Nesterov wq = NULL; 17603af24433SOleg Nesterov } 17613af24433SOleg Nesterov return wq; 17624690c4abSTejun Heo err: 17634690c4abSTejun Heo if (wq) { 17640f900049STejun Heo free_cwqs(wq->cpu_wq); 17654690c4abSTejun Heo kfree(wq); 17664690c4abSTejun Heo } 17674690c4abSTejun Heo return NULL; 17683af24433SOleg Nesterov } 17694e6045f1SJohannes Berg EXPORT_SYMBOL_GPL(__create_workqueue_key); 17703af24433SOleg Nesterov 17713af24433SOleg Nesterov /** 17723af24433SOleg Nesterov * destroy_workqueue - safely terminate a workqueue 17733af24433SOleg Nesterov * @wq: target workqueue 17743af24433SOleg Nesterov * 17753af24433SOleg Nesterov * Safely destroy a workqueue. All work currently pending will be done first. 17763af24433SOleg Nesterov */ 17773af24433SOleg Nesterov void destroy_workqueue(struct workqueue_struct *wq) 17783af24433SOleg Nesterov { 1779c8e55f36STejun Heo unsigned int cpu; 17803af24433SOleg Nesterov 1781a0a1a5fdSTejun Heo flush_workqueue(wq); 1782a0a1a5fdSTejun Heo 1783a0a1a5fdSTejun Heo /* 1784a0a1a5fdSTejun Heo * wq list is used to freeze wq, remove from list after 1785a0a1a5fdSTejun Heo * flushing is complete in case freeze races us. 1786a0a1a5fdSTejun Heo */ 17873da1c84cSOleg Nesterov cpu_maps_update_begin(); 178895402b38SGautham R Shenoy spin_lock(&workqueue_lock); 17893af24433SOleg Nesterov list_del(&wq->list); 179095402b38SGautham R Shenoy spin_unlock(&workqueue_lock); 17913da1c84cSOleg Nesterov cpu_maps_update_done(); 17923af24433SOleg Nesterov 179373f53c4aSTejun Heo for_each_possible_cpu(cpu) { 179473f53c4aSTejun Heo struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 179573f53c4aSTejun Heo int i; 179673f53c4aSTejun Heo 1797c34056a3STejun Heo if (cwq->worker) { 1798c8e55f36STejun Heo spin_lock_irq(&cwq->gcwq->lock); 1799c34056a3STejun Heo destroy_worker(cwq->worker); 1800c34056a3STejun Heo cwq->worker = NULL; 1801c8e55f36STejun Heo spin_unlock_irq(&cwq->gcwq->lock); 180273f53c4aSTejun Heo } 180373f53c4aSTejun Heo 180473f53c4aSTejun Heo for (i = 0; i < WORK_NR_COLORS; i++) 180573f53c4aSTejun Heo BUG_ON(cwq->nr_in_flight[i]); 18061e19ffc6STejun Heo BUG_ON(cwq->nr_active); 18071e19ffc6STejun Heo BUG_ON(!list_empty(&cwq->delayed_works)); 180873f53c4aSTejun Heo } 18091537663fSTejun Heo 18100f900049STejun Heo free_cwqs(wq->cpu_wq); 18113af24433SOleg Nesterov kfree(wq); 18123af24433SOleg Nesterov } 18133af24433SOleg Nesterov EXPORT_SYMBOL_GPL(destroy_workqueue); 18143af24433SOleg Nesterov 18159c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, 18161da177e4SLinus Torvalds unsigned long action, 18171da177e4SLinus Torvalds void *hcpu) 18181da177e4SLinus Torvalds { 18193af24433SOleg Nesterov unsigned int cpu = (unsigned long)hcpu; 18203af24433SOleg Nesterov struct cpu_workqueue_struct *cwq; 18211da177e4SLinus Torvalds struct workqueue_struct *wq; 18221da177e4SLinus Torvalds 18238bb78442SRafael J. Wysocki action &= ~CPU_TASKS_FROZEN; 18248bb78442SRafael J. Wysocki 18251da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 18261537663fSTejun Heo if (wq->flags & WQ_SINGLE_THREAD) 18271537663fSTejun Heo continue; 18281537663fSTejun Heo 18291537663fSTejun Heo cwq = get_cwq(cpu, wq); 18303af24433SOleg Nesterov 18313af24433SOleg Nesterov switch (action) { 18323da1c84cSOleg Nesterov case CPU_POST_DEAD: 183373f53c4aSTejun Heo flush_workqueue(wq); 18341da177e4SLinus Torvalds break; 18351da177e4SLinus Torvalds } 18363af24433SOleg Nesterov } 18371da177e4SLinus Torvalds 18381537663fSTejun Heo return notifier_from_errno(0); 18391da177e4SLinus Torvalds } 18401da177e4SLinus Torvalds 18412d3854a3SRusty Russell #ifdef CONFIG_SMP 18428ccad40dSRusty Russell 18432d3854a3SRusty Russell struct work_for_cpu { 18446b44003eSAndrew Morton struct completion completion; 18452d3854a3SRusty Russell long (*fn)(void *); 18462d3854a3SRusty Russell void *arg; 18472d3854a3SRusty Russell long ret; 18482d3854a3SRusty Russell }; 18492d3854a3SRusty Russell 18506b44003eSAndrew Morton static int do_work_for_cpu(void *_wfc) 18512d3854a3SRusty Russell { 18526b44003eSAndrew Morton struct work_for_cpu *wfc = _wfc; 18532d3854a3SRusty Russell wfc->ret = wfc->fn(wfc->arg); 18546b44003eSAndrew Morton complete(&wfc->completion); 18556b44003eSAndrew Morton return 0; 18562d3854a3SRusty Russell } 18572d3854a3SRusty Russell 18582d3854a3SRusty Russell /** 18592d3854a3SRusty Russell * work_on_cpu - run a function in user context on a particular cpu 18602d3854a3SRusty Russell * @cpu: the cpu to run on 18612d3854a3SRusty Russell * @fn: the function to run 18622d3854a3SRusty Russell * @arg: the function arg 18632d3854a3SRusty Russell * 186431ad9081SRusty Russell * This will return the value @fn returns. 186531ad9081SRusty Russell * It is up to the caller to ensure that the cpu doesn't go offline. 18666b44003eSAndrew Morton * The caller must not hold any locks which would prevent @fn from completing. 18672d3854a3SRusty Russell */ 18682d3854a3SRusty Russell long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) 18692d3854a3SRusty Russell { 18706b44003eSAndrew Morton struct task_struct *sub_thread; 18716b44003eSAndrew Morton struct work_for_cpu wfc = { 18726b44003eSAndrew Morton .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion), 18736b44003eSAndrew Morton .fn = fn, 18746b44003eSAndrew Morton .arg = arg, 18756b44003eSAndrew Morton }; 18762d3854a3SRusty Russell 18776b44003eSAndrew Morton sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu"); 18786b44003eSAndrew Morton if (IS_ERR(sub_thread)) 18796b44003eSAndrew Morton return PTR_ERR(sub_thread); 18806b44003eSAndrew Morton kthread_bind(sub_thread, cpu); 18816b44003eSAndrew Morton wake_up_process(sub_thread); 18826b44003eSAndrew Morton wait_for_completion(&wfc.completion); 18832d3854a3SRusty Russell return wfc.ret; 18842d3854a3SRusty Russell } 18852d3854a3SRusty Russell EXPORT_SYMBOL_GPL(work_on_cpu); 18862d3854a3SRusty Russell #endif /* CONFIG_SMP */ 18872d3854a3SRusty Russell 1888a0a1a5fdSTejun Heo #ifdef CONFIG_FREEZER 1889a0a1a5fdSTejun Heo 1890a0a1a5fdSTejun Heo /** 1891a0a1a5fdSTejun Heo * freeze_workqueues_begin - begin freezing workqueues 1892a0a1a5fdSTejun Heo * 1893a0a1a5fdSTejun Heo * Start freezing workqueues. After this function returns, all 1894a0a1a5fdSTejun Heo * freezeable workqueues will queue new works to their frozen_works 1895a0a1a5fdSTejun Heo * list instead of the cwq ones. 1896a0a1a5fdSTejun Heo * 1897a0a1a5fdSTejun Heo * CONTEXT: 18988b03ae3cSTejun Heo * Grabs and releases workqueue_lock and gcwq->lock's. 1899a0a1a5fdSTejun Heo */ 1900a0a1a5fdSTejun Heo void freeze_workqueues_begin(void) 1901a0a1a5fdSTejun Heo { 1902a0a1a5fdSTejun Heo struct workqueue_struct *wq; 1903a0a1a5fdSTejun Heo unsigned int cpu; 1904a0a1a5fdSTejun Heo 1905a0a1a5fdSTejun Heo spin_lock(&workqueue_lock); 1906a0a1a5fdSTejun Heo 1907a0a1a5fdSTejun Heo BUG_ON(workqueue_freezing); 1908a0a1a5fdSTejun Heo workqueue_freezing = true; 1909a0a1a5fdSTejun Heo 1910a0a1a5fdSTejun Heo for_each_possible_cpu(cpu) { 19118b03ae3cSTejun Heo struct global_cwq *gcwq = get_gcwq(cpu); 19128b03ae3cSTejun Heo 19138b03ae3cSTejun Heo spin_lock_irq(&gcwq->lock); 19148b03ae3cSTejun Heo 1915a0a1a5fdSTejun Heo list_for_each_entry(wq, &workqueues, list) { 1916a0a1a5fdSTejun Heo struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 1917a0a1a5fdSTejun Heo 1918a0a1a5fdSTejun Heo if (wq->flags & WQ_FREEZEABLE) 1919a0a1a5fdSTejun Heo cwq->max_active = 0; 1920a0a1a5fdSTejun Heo } 19218b03ae3cSTejun Heo 19228b03ae3cSTejun Heo spin_unlock_irq(&gcwq->lock); 1923a0a1a5fdSTejun Heo } 1924a0a1a5fdSTejun Heo 1925a0a1a5fdSTejun Heo spin_unlock(&workqueue_lock); 1926a0a1a5fdSTejun Heo } 1927a0a1a5fdSTejun Heo 1928a0a1a5fdSTejun Heo /** 1929a0a1a5fdSTejun Heo * freeze_workqueues_busy - are freezeable workqueues still busy? 1930a0a1a5fdSTejun Heo * 1931a0a1a5fdSTejun Heo * Check whether freezing is complete. This function must be called 1932a0a1a5fdSTejun Heo * between freeze_workqueues_begin() and thaw_workqueues(). 1933a0a1a5fdSTejun Heo * 1934a0a1a5fdSTejun Heo * CONTEXT: 1935a0a1a5fdSTejun Heo * Grabs and releases workqueue_lock. 1936a0a1a5fdSTejun Heo * 1937a0a1a5fdSTejun Heo * RETURNS: 1938a0a1a5fdSTejun Heo * %true if some freezeable workqueues are still busy. %false if 1939a0a1a5fdSTejun Heo * freezing is complete. 1940a0a1a5fdSTejun Heo */ 1941a0a1a5fdSTejun Heo bool freeze_workqueues_busy(void) 1942a0a1a5fdSTejun Heo { 1943a0a1a5fdSTejun Heo struct workqueue_struct *wq; 1944a0a1a5fdSTejun Heo unsigned int cpu; 1945a0a1a5fdSTejun Heo bool busy = false; 1946a0a1a5fdSTejun Heo 1947a0a1a5fdSTejun Heo spin_lock(&workqueue_lock); 1948a0a1a5fdSTejun Heo 1949a0a1a5fdSTejun Heo BUG_ON(!workqueue_freezing); 1950a0a1a5fdSTejun Heo 1951a0a1a5fdSTejun Heo for_each_possible_cpu(cpu) { 1952a0a1a5fdSTejun Heo /* 1953a0a1a5fdSTejun Heo * nr_active is monotonically decreasing. It's safe 1954a0a1a5fdSTejun Heo * to peek without lock. 1955a0a1a5fdSTejun Heo */ 1956a0a1a5fdSTejun Heo list_for_each_entry(wq, &workqueues, list) { 1957a0a1a5fdSTejun Heo struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 1958a0a1a5fdSTejun Heo 1959a0a1a5fdSTejun Heo if (!(wq->flags & WQ_FREEZEABLE)) 1960a0a1a5fdSTejun Heo continue; 1961a0a1a5fdSTejun Heo 1962a0a1a5fdSTejun Heo BUG_ON(cwq->nr_active < 0); 1963a0a1a5fdSTejun Heo if (cwq->nr_active) { 1964a0a1a5fdSTejun Heo busy = true; 1965a0a1a5fdSTejun Heo goto out_unlock; 1966a0a1a5fdSTejun Heo } 1967a0a1a5fdSTejun Heo } 1968a0a1a5fdSTejun Heo } 1969a0a1a5fdSTejun Heo out_unlock: 1970a0a1a5fdSTejun Heo spin_unlock(&workqueue_lock); 1971a0a1a5fdSTejun Heo return busy; 1972a0a1a5fdSTejun Heo } 1973a0a1a5fdSTejun Heo 1974a0a1a5fdSTejun Heo /** 1975a0a1a5fdSTejun Heo * thaw_workqueues - thaw workqueues 1976a0a1a5fdSTejun Heo * 1977a0a1a5fdSTejun Heo * Thaw workqueues. Normal queueing is restored and all collected 1978a0a1a5fdSTejun Heo * frozen works are transferred to their respective cwq worklists. 1979a0a1a5fdSTejun Heo * 1980a0a1a5fdSTejun Heo * CONTEXT: 19818b03ae3cSTejun Heo * Grabs and releases workqueue_lock and gcwq->lock's. 1982a0a1a5fdSTejun Heo */ 1983a0a1a5fdSTejun Heo void thaw_workqueues(void) 1984a0a1a5fdSTejun Heo { 1985a0a1a5fdSTejun Heo struct workqueue_struct *wq; 1986a0a1a5fdSTejun Heo unsigned int cpu; 1987a0a1a5fdSTejun Heo 1988a0a1a5fdSTejun Heo spin_lock(&workqueue_lock); 1989a0a1a5fdSTejun Heo 1990a0a1a5fdSTejun Heo if (!workqueue_freezing) 1991a0a1a5fdSTejun Heo goto out_unlock; 1992a0a1a5fdSTejun Heo 1993a0a1a5fdSTejun Heo for_each_possible_cpu(cpu) { 19948b03ae3cSTejun Heo struct global_cwq *gcwq = get_gcwq(cpu); 19958b03ae3cSTejun Heo 19968b03ae3cSTejun Heo spin_lock_irq(&gcwq->lock); 19978b03ae3cSTejun Heo 1998a0a1a5fdSTejun Heo list_for_each_entry(wq, &workqueues, list) { 1999a0a1a5fdSTejun Heo struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 2000a0a1a5fdSTejun Heo 2001a0a1a5fdSTejun Heo if (!(wq->flags & WQ_FREEZEABLE)) 2002a0a1a5fdSTejun Heo continue; 2003a0a1a5fdSTejun Heo 2004a0a1a5fdSTejun Heo /* restore max_active and repopulate worklist */ 2005a0a1a5fdSTejun Heo cwq->max_active = wq->saved_max_active; 2006a0a1a5fdSTejun Heo 2007a0a1a5fdSTejun Heo while (!list_empty(&cwq->delayed_works) && 2008a0a1a5fdSTejun Heo cwq->nr_active < cwq->max_active) 2009a0a1a5fdSTejun Heo cwq_activate_first_delayed(cwq); 2010a0a1a5fdSTejun Heo 2011c8e55f36STejun Heo wake_up_process(cwq->worker->task); 2012a0a1a5fdSTejun Heo } 20138b03ae3cSTejun Heo 20148b03ae3cSTejun Heo spin_unlock_irq(&gcwq->lock); 2015a0a1a5fdSTejun Heo } 2016a0a1a5fdSTejun Heo 2017a0a1a5fdSTejun Heo workqueue_freezing = false; 2018a0a1a5fdSTejun Heo out_unlock: 2019a0a1a5fdSTejun Heo spin_unlock(&workqueue_lock); 2020a0a1a5fdSTejun Heo } 2021a0a1a5fdSTejun Heo #endif /* CONFIG_FREEZER */ 2022a0a1a5fdSTejun Heo 2023c12920d1SOleg Nesterov void __init init_workqueues(void) 20241da177e4SLinus Torvalds { 2025c34056a3STejun Heo unsigned int cpu; 2026c8e55f36STejun Heo int i; 2027c34056a3STejun Heo 2028e7577c50SRusty Russell singlethread_cpu = cpumask_first(cpu_possible_mask); 20291da177e4SLinus Torvalds hotcpu_notifier(workqueue_cpu_callback, 0); 20308b03ae3cSTejun Heo 20318b03ae3cSTejun Heo /* initialize gcwqs */ 20328b03ae3cSTejun Heo for_each_possible_cpu(cpu) { 20338b03ae3cSTejun Heo struct global_cwq *gcwq = get_gcwq(cpu); 20348b03ae3cSTejun Heo 20358b03ae3cSTejun Heo spin_lock_init(&gcwq->lock); 20368b03ae3cSTejun Heo gcwq->cpu = cpu; 20378b03ae3cSTejun Heo 2038c8e55f36STejun Heo INIT_LIST_HEAD(&gcwq->idle_list); 2039c8e55f36STejun Heo for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) 2040c8e55f36STejun Heo INIT_HLIST_HEAD(&gcwq->busy_hash[i]); 2041c8e55f36STejun Heo 20428b03ae3cSTejun Heo ida_init(&gcwq->worker_ida); 20438b03ae3cSTejun Heo } 20448b03ae3cSTejun Heo 20451da177e4SLinus Torvalds keventd_wq = create_workqueue("events"); 20461da177e4SLinus Torvalds BUG_ON(!keventd_wq); 20471da177e4SLinus Torvalds } 2048