11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/kernel/workqueue.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Generic mechanism for defining kernel helper threads for running 51da177e4SLinus Torvalds * arbitrary tasks in process context. 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Started by Ingo Molnar, Copyright (C) 2002 81da177e4SLinus Torvalds * 91da177e4SLinus Torvalds * Derived from the taskqueue/keventd code by: 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * David Woodhouse <dwmw2@infradead.org> 121da177e4SLinus Torvalds * Andrew Morton <andrewm@uow.edu.au> 131da177e4SLinus Torvalds * Kai Petzke <wpp@marie.physik.tu-berlin.de> 141da177e4SLinus Torvalds * Theodore Ts'o <tytso@mit.edu> 1589ada679SChristoph Lameter * 16cde53535SChristoph Lameter * Made to use alloc_percpu by Christoph Lameter. 171da177e4SLinus Torvalds */ 181da177e4SLinus Torvalds 191da177e4SLinus Torvalds #include <linux/module.h> 201da177e4SLinus Torvalds #include <linux/kernel.h> 211da177e4SLinus Torvalds #include <linux/sched.h> 221da177e4SLinus Torvalds #include <linux/init.h> 231da177e4SLinus Torvalds #include <linux/signal.h> 241da177e4SLinus Torvalds #include <linux/completion.h> 251da177e4SLinus Torvalds #include <linux/workqueue.h> 261da177e4SLinus Torvalds #include <linux/slab.h> 271da177e4SLinus Torvalds #include <linux/cpu.h> 281da177e4SLinus Torvalds #include <linux/notifier.h> 291da177e4SLinus Torvalds #include <linux/kthread.h> 301fa44ecaSJames Bottomley #include <linux/hardirq.h> 3146934023SChristoph Lameter #include <linux/mempolicy.h> 32341a5958SRafael J. Wysocki #include <linux/freezer.h> 33d5abe669SPeter Zijlstra #include <linux/kallsyms.h> 34d5abe669SPeter Zijlstra #include <linux/debug_locks.h> 354e6045f1SJohannes Berg #include <linux/lockdep.h> 361da177e4SLinus Torvalds 371da177e4SLinus Torvalds /* 38f756d5e2SNathan Lynch * The per-CPU workqueue (if single thread, we always use the first 39f756d5e2SNathan Lynch * possible cpu). 401da177e4SLinus Torvalds */ 411da177e4SLinus Torvalds struct cpu_workqueue_struct { 421da177e4SLinus Torvalds 431da177e4SLinus Torvalds spinlock_t lock; 441da177e4SLinus Torvalds 451da177e4SLinus Torvalds struct list_head worklist; 461da177e4SLinus Torvalds wait_queue_head_t more_work; 473af24433SOleg Nesterov struct work_struct *current_work; 481da177e4SLinus Torvalds 491da177e4SLinus Torvalds struct workqueue_struct *wq; 5036c8b586SIngo Molnar struct task_struct *thread; 511da177e4SLinus Torvalds 521da177e4SLinus Torvalds int run_depth; /* Detect run_workqueue() recursion depth */ 531da177e4SLinus Torvalds } ____cacheline_aligned; 541da177e4SLinus Torvalds 551da177e4SLinus Torvalds /* 561da177e4SLinus Torvalds * The externally visible workqueue abstraction is an array of 571da177e4SLinus Torvalds * per-CPU workqueues: 581da177e4SLinus Torvalds */ 591da177e4SLinus Torvalds struct workqueue_struct { 6089ada679SChristoph Lameter struct cpu_workqueue_struct *cpu_wq; 61cce1a165SOleg Nesterov struct list_head list; 621da177e4SLinus Torvalds const char *name; 63cce1a165SOleg Nesterov int singlethread; 64319c2a98SOleg Nesterov int freezeable; /* Freeze threads during suspend */ 654e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP 664e6045f1SJohannes Berg struct lockdep_map lockdep_map; 674e6045f1SJohannes Berg #endif 681da177e4SLinus Torvalds }; 691da177e4SLinus Torvalds 7095402b38SGautham R Shenoy /* Serializes the accesses to the list of workqueues. */ 7195402b38SGautham R Shenoy static DEFINE_SPINLOCK(workqueue_lock); 721da177e4SLinus Torvalds static LIST_HEAD(workqueues); 731da177e4SLinus Torvalds 743af24433SOleg Nesterov static int singlethread_cpu __read_mostly; 75b1f4ec17SOleg Nesterov static cpumask_t cpu_singlethread_map __read_mostly; 7614441960SOleg Nesterov /* 7714441960SOleg Nesterov * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD 7814441960SOleg Nesterov * flushes cwq->worklist. This means that flush_workqueue/wait_on_work 7914441960SOleg Nesterov * which comes in between can't use for_each_online_cpu(). We could 8014441960SOleg Nesterov * use cpu_possible_map, the cpumask below is more a documentation 8114441960SOleg Nesterov * than optimization. 8214441960SOleg Nesterov */ 833af24433SOleg Nesterov static cpumask_t cpu_populated_map __read_mostly; 84f756d5e2SNathan Lynch 851da177e4SLinus Torvalds /* If it's single threaded, it isn't in the list of workqueues. */ 861da177e4SLinus Torvalds static inline int is_single_threaded(struct workqueue_struct *wq) 871da177e4SLinus Torvalds { 88cce1a165SOleg Nesterov return wq->singlethread; 891da177e4SLinus Torvalds } 901da177e4SLinus Torvalds 91b1f4ec17SOleg Nesterov static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq) 92b1f4ec17SOleg Nesterov { 93b1f4ec17SOleg Nesterov return is_single_threaded(wq) 94b1f4ec17SOleg Nesterov ? &cpu_singlethread_map : &cpu_populated_map; 95b1f4ec17SOleg Nesterov } 96b1f4ec17SOleg Nesterov 97a848e3b6SOleg Nesterov static 98a848e3b6SOleg Nesterov struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu) 99a848e3b6SOleg Nesterov { 100a848e3b6SOleg Nesterov if (unlikely(is_single_threaded(wq))) 101a848e3b6SOleg Nesterov cpu = singlethread_cpu; 102a848e3b6SOleg Nesterov return per_cpu_ptr(wq->cpu_wq, cpu); 103a848e3b6SOleg Nesterov } 104a848e3b6SOleg Nesterov 1054594bf15SDavid Howells /* 1064594bf15SDavid Howells * Set the workqueue on which a work item is to be run 1074594bf15SDavid Howells * - Must *only* be called if the pending flag is set 1084594bf15SDavid Howells */ 109ed7c0feeSOleg Nesterov static inline void set_wq_data(struct work_struct *work, 110ed7c0feeSOleg Nesterov struct cpu_workqueue_struct *cwq) 111365970a1SDavid Howells { 1124594bf15SDavid Howells unsigned long new; 113365970a1SDavid Howells 1144594bf15SDavid Howells BUG_ON(!work_pending(work)); 1154594bf15SDavid Howells 116ed7c0feeSOleg Nesterov new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING); 117a08727baSLinus Torvalds new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); 118a08727baSLinus Torvalds atomic_long_set(&work->data, new); 119365970a1SDavid Howells } 120365970a1SDavid Howells 121ed7c0feeSOleg Nesterov static inline 122ed7c0feeSOleg Nesterov struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) 123365970a1SDavid Howells { 124a08727baSLinus Torvalds return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); 125365970a1SDavid Howells } 126365970a1SDavid Howells 127b89deed3SOleg Nesterov static void insert_work(struct cpu_workqueue_struct *cwq, 1281a4d9b0aSOleg Nesterov struct work_struct *work, struct list_head *head) 129b89deed3SOleg Nesterov { 130b89deed3SOleg Nesterov set_wq_data(work, cwq); 1316e84d644SOleg Nesterov /* 1326e84d644SOleg Nesterov * Ensure that we get the right work->data if we see the 1336e84d644SOleg Nesterov * result of list_add() below, see try_to_grab_pending(). 1346e84d644SOleg Nesterov */ 1356e84d644SOleg Nesterov smp_wmb(); 1361a4d9b0aSOleg Nesterov list_add_tail(&work->entry, head); 137b89deed3SOleg Nesterov wake_up(&cwq->more_work); 138b89deed3SOleg Nesterov } 139b89deed3SOleg Nesterov 1401da177e4SLinus Torvalds static void __queue_work(struct cpu_workqueue_struct *cwq, 1411da177e4SLinus Torvalds struct work_struct *work) 1421da177e4SLinus Torvalds { 1431da177e4SLinus Torvalds unsigned long flags; 1441da177e4SLinus Torvalds 1451da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 1461a4d9b0aSOleg Nesterov insert_work(cwq, work, &cwq->worklist); 1471da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 1481da177e4SLinus Torvalds } 1491da177e4SLinus Torvalds 1500fcb78c2SRolf Eike Beer /** 1510fcb78c2SRolf Eike Beer * queue_work - queue work on a workqueue 1520fcb78c2SRolf Eike Beer * @wq: workqueue to use 1530fcb78c2SRolf Eike Beer * @work: work to queue 1540fcb78c2SRolf Eike Beer * 155057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 1561da177e4SLinus Torvalds * 15700dfcaf7SOleg Nesterov * We queue the work to the CPU on which it was submitted, but if the CPU dies 15800dfcaf7SOleg Nesterov * it can be processed by another CPU. 1591da177e4SLinus Torvalds */ 1607ad5b3a5SHarvey Harrison int queue_work(struct workqueue_struct *wq, struct work_struct *work) 1611da177e4SLinus Torvalds { 162ef1ca236SOleg Nesterov int ret; 1631da177e4SLinus Torvalds 164ef1ca236SOleg Nesterov ret = queue_work_on(get_cpu(), wq, work); 165a848e3b6SOleg Nesterov put_cpu(); 166ef1ca236SOleg Nesterov 1671da177e4SLinus Torvalds return ret; 1681da177e4SLinus Torvalds } 169ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work); 1701da177e4SLinus Torvalds 171c1a220e7SZhang Rui /** 172c1a220e7SZhang Rui * queue_work_on - queue work on specific cpu 173c1a220e7SZhang Rui * @cpu: CPU number to execute work on 174c1a220e7SZhang Rui * @wq: workqueue to use 175c1a220e7SZhang Rui * @work: work to queue 176c1a220e7SZhang Rui * 177c1a220e7SZhang Rui * Returns 0 if @work was already on a queue, non-zero otherwise. 178c1a220e7SZhang Rui * 179c1a220e7SZhang Rui * We queue the work to a specific CPU, the caller must ensure it 180c1a220e7SZhang Rui * can't go away. 181c1a220e7SZhang Rui */ 182c1a220e7SZhang Rui int 183c1a220e7SZhang Rui queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) 184c1a220e7SZhang Rui { 185c1a220e7SZhang Rui int ret = 0; 186c1a220e7SZhang Rui 187c1a220e7SZhang Rui if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 188c1a220e7SZhang Rui BUG_ON(!list_empty(&work->entry)); 189c1a220e7SZhang Rui __queue_work(wq_per_cpu(wq, cpu), work); 190c1a220e7SZhang Rui ret = 1; 191c1a220e7SZhang Rui } 192c1a220e7SZhang Rui return ret; 193c1a220e7SZhang Rui } 194c1a220e7SZhang Rui EXPORT_SYMBOL_GPL(queue_work_on); 195c1a220e7SZhang Rui 1966d141c3fSLi Zefan static void delayed_work_timer_fn(unsigned long __data) 1971da177e4SLinus Torvalds { 19852bad64dSDavid Howells struct delayed_work *dwork = (struct delayed_work *)__data; 199ed7c0feeSOleg Nesterov struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); 200ed7c0feeSOleg Nesterov struct workqueue_struct *wq = cwq->wq; 2011da177e4SLinus Torvalds 202a848e3b6SOleg Nesterov __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work); 2031da177e4SLinus Torvalds } 2041da177e4SLinus Torvalds 2050fcb78c2SRolf Eike Beer /** 2060fcb78c2SRolf Eike Beer * queue_delayed_work - queue work on a workqueue after delay 2070fcb78c2SRolf Eike Beer * @wq: workqueue to use 208af9997e4SRandy Dunlap * @dwork: delayable work to queue 2090fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 2100fcb78c2SRolf Eike Beer * 211057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 2120fcb78c2SRolf Eike Beer */ 2137ad5b3a5SHarvey Harrison int queue_delayed_work(struct workqueue_struct *wq, 21452bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 2151da177e4SLinus Torvalds { 21652bad64dSDavid Howells if (delay == 0) 21763bc0362SOleg Nesterov return queue_work(wq, &dwork->work); 2181da177e4SLinus Torvalds 21963bc0362SOleg Nesterov return queue_delayed_work_on(-1, wq, dwork, delay); 2201da177e4SLinus Torvalds } 221ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work); 2221da177e4SLinus Torvalds 2230fcb78c2SRolf Eike Beer /** 2240fcb78c2SRolf Eike Beer * queue_delayed_work_on - queue work on specific CPU after delay 2250fcb78c2SRolf Eike Beer * @cpu: CPU number to execute work on 2260fcb78c2SRolf Eike Beer * @wq: workqueue to use 227af9997e4SRandy Dunlap * @dwork: work to queue 2280fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 2290fcb78c2SRolf Eike Beer * 230057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 2310fcb78c2SRolf Eike Beer */ 2327a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 23352bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 2347a6bc1cdSVenkatesh Pallipadi { 2357a6bc1cdSVenkatesh Pallipadi int ret = 0; 23652bad64dSDavid Howells struct timer_list *timer = &dwork->timer; 23752bad64dSDavid Howells struct work_struct *work = &dwork->work; 2387a6bc1cdSVenkatesh Pallipadi 239a08727baSLinus Torvalds if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 2407a6bc1cdSVenkatesh Pallipadi BUG_ON(timer_pending(timer)); 2417a6bc1cdSVenkatesh Pallipadi BUG_ON(!list_empty(&work->entry)); 2427a6bc1cdSVenkatesh Pallipadi 2438a3e77ccSAndrew Liu timer_stats_timer_set_start_info(&dwork->timer); 2448a3e77ccSAndrew Liu 245ed7c0feeSOleg Nesterov /* This stores cwq for the moment, for the timer_fn */ 246a848e3b6SOleg Nesterov set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id())); 2477a6bc1cdSVenkatesh Pallipadi timer->expires = jiffies + delay; 24852bad64dSDavid Howells timer->data = (unsigned long)dwork; 2497a6bc1cdSVenkatesh Pallipadi timer->function = delayed_work_timer_fn; 25063bc0362SOleg Nesterov 25163bc0362SOleg Nesterov if (unlikely(cpu >= 0)) 2527a6bc1cdSVenkatesh Pallipadi add_timer_on(timer, cpu); 25363bc0362SOleg Nesterov else 25463bc0362SOleg Nesterov add_timer(timer); 2557a6bc1cdSVenkatesh Pallipadi ret = 1; 2567a6bc1cdSVenkatesh Pallipadi } 2577a6bc1cdSVenkatesh Pallipadi return ret; 2587a6bc1cdSVenkatesh Pallipadi } 259ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on); 2601da177e4SLinus Torvalds 261858119e1SArjan van de Ven static void run_workqueue(struct cpu_workqueue_struct *cwq) 2621da177e4SLinus Torvalds { 263f293ea92SOleg Nesterov spin_lock_irq(&cwq->lock); 2641da177e4SLinus Torvalds cwq->run_depth++; 2651da177e4SLinus Torvalds if (cwq->run_depth > 3) { 2661da177e4SLinus Torvalds /* morton gets to eat his hat */ 2671da177e4SLinus Torvalds printk("%s: recursion depth exceeded: %d\n", 268af1f16d0SHarvey Harrison __func__, cwq->run_depth); 2691da177e4SLinus Torvalds dump_stack(); 2701da177e4SLinus Torvalds } 2711da177e4SLinus Torvalds while (!list_empty(&cwq->worklist)) { 2721da177e4SLinus Torvalds struct work_struct *work = list_entry(cwq->worklist.next, 2731da177e4SLinus Torvalds struct work_struct, entry); 2746bb49e59SDavid Howells work_func_t f = work->func; 2754e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP 2764e6045f1SJohannes Berg /* 2774e6045f1SJohannes Berg * It is permissible to free the struct work_struct 2784e6045f1SJohannes Berg * from inside the function that is called from it, 2794e6045f1SJohannes Berg * this we need to take into account for lockdep too. 2804e6045f1SJohannes Berg * To avoid bogus "held lock freed" warnings as well 2814e6045f1SJohannes Berg * as problems when looking into work->lockdep_map, 2824e6045f1SJohannes Berg * make a copy and use that here. 2834e6045f1SJohannes Berg */ 2844e6045f1SJohannes Berg struct lockdep_map lockdep_map = work->lockdep_map; 2854e6045f1SJohannes Berg #endif 2861da177e4SLinus Torvalds 287b89deed3SOleg Nesterov cwq->current_work = work; 2881da177e4SLinus Torvalds list_del_init(cwq->worklist.next); 289f293ea92SOleg Nesterov spin_unlock_irq(&cwq->lock); 2901da177e4SLinus Torvalds 291365970a1SDavid Howells BUG_ON(get_wq_data(work) != cwq); 29223b2e599SOleg Nesterov work_clear_pending(work); 2933295f0efSIngo Molnar lock_map_acquire(&cwq->wq->lockdep_map); 2943295f0efSIngo Molnar lock_map_acquire(&lockdep_map); 29565f27f38SDavid Howells f(work); 2963295f0efSIngo Molnar lock_map_release(&lockdep_map); 2973295f0efSIngo Molnar lock_map_release(&cwq->wq->lockdep_map); 2981da177e4SLinus Torvalds 299d5abe669SPeter Zijlstra if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 300d5abe669SPeter Zijlstra printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " 301d5abe669SPeter Zijlstra "%s/0x%08x/%d\n", 302d5abe669SPeter Zijlstra current->comm, preempt_count(), 303ba25f9dcSPavel Emelyanov task_pid_nr(current)); 304d5abe669SPeter Zijlstra printk(KERN_ERR " last function: "); 305d5abe669SPeter Zijlstra print_symbol("%s\n", (unsigned long)f); 306d5abe669SPeter Zijlstra debug_show_held_locks(current); 307d5abe669SPeter Zijlstra dump_stack(); 308d5abe669SPeter Zijlstra } 309d5abe669SPeter Zijlstra 310f293ea92SOleg Nesterov spin_lock_irq(&cwq->lock); 311b89deed3SOleg Nesterov cwq->current_work = NULL; 3121da177e4SLinus Torvalds } 3131da177e4SLinus Torvalds cwq->run_depth--; 314f293ea92SOleg Nesterov spin_unlock_irq(&cwq->lock); 3151da177e4SLinus Torvalds } 3161da177e4SLinus Torvalds 3171da177e4SLinus Torvalds static int worker_thread(void *__cwq) 3181da177e4SLinus Torvalds { 3191da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq = __cwq; 3203af24433SOleg Nesterov DEFINE_WAIT(wait); 3211da177e4SLinus Torvalds 32283144186SRafael J. Wysocki if (cwq->wq->freezeable) 32383144186SRafael J. Wysocki set_freezable(); 3241da177e4SLinus Torvalds 3251da177e4SLinus Torvalds set_user_nice(current, -5); 3261da177e4SLinus Torvalds 3273af24433SOleg Nesterov for (;;) { 3283af24433SOleg Nesterov prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE); 32914441960SOleg Nesterov if (!freezing(current) && 33014441960SOleg Nesterov !kthread_should_stop() && 33114441960SOleg Nesterov list_empty(&cwq->worklist)) 3321da177e4SLinus Torvalds schedule(); 3333af24433SOleg Nesterov finish_wait(&cwq->more_work, &wait); 3341da177e4SLinus Torvalds 33585f4186aSOleg Nesterov try_to_freeze(); 33685f4186aSOleg Nesterov 33714441960SOleg Nesterov if (kthread_should_stop()) 3383af24433SOleg Nesterov break; 3393af24433SOleg Nesterov 3401da177e4SLinus Torvalds run_workqueue(cwq); 3411da177e4SLinus Torvalds } 3423af24433SOleg Nesterov 3431da177e4SLinus Torvalds return 0; 3441da177e4SLinus Torvalds } 3451da177e4SLinus Torvalds 346fc2e4d70SOleg Nesterov struct wq_barrier { 347fc2e4d70SOleg Nesterov struct work_struct work; 348fc2e4d70SOleg Nesterov struct completion done; 349fc2e4d70SOleg Nesterov }; 350fc2e4d70SOleg Nesterov 351fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work) 352fc2e4d70SOleg Nesterov { 353fc2e4d70SOleg Nesterov struct wq_barrier *barr = container_of(work, struct wq_barrier, work); 354fc2e4d70SOleg Nesterov complete(&barr->done); 355fc2e4d70SOleg Nesterov } 356fc2e4d70SOleg Nesterov 35783c22520SOleg Nesterov static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, 3581a4d9b0aSOleg Nesterov struct wq_barrier *barr, struct list_head *head) 359fc2e4d70SOleg Nesterov { 360fc2e4d70SOleg Nesterov INIT_WORK(&barr->work, wq_barrier_func); 361fc2e4d70SOleg Nesterov __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); 362fc2e4d70SOleg Nesterov 363fc2e4d70SOleg Nesterov init_completion(&barr->done); 36483c22520SOleg Nesterov 3651a4d9b0aSOleg Nesterov insert_work(cwq, &barr->work, head); 366fc2e4d70SOleg Nesterov } 367fc2e4d70SOleg Nesterov 36814441960SOleg Nesterov static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) 3691da177e4SLinus Torvalds { 37014441960SOleg Nesterov int active; 37114441960SOleg Nesterov 3721da177e4SLinus Torvalds if (cwq->thread == current) { 3731da177e4SLinus Torvalds /* 3741da177e4SLinus Torvalds * Probably keventd trying to flush its own queue. So simply run 3751da177e4SLinus Torvalds * it by hand rather than deadlocking. 3761da177e4SLinus Torvalds */ 3771da177e4SLinus Torvalds run_workqueue(cwq); 37814441960SOleg Nesterov active = 1; 3791da177e4SLinus Torvalds } else { 380fc2e4d70SOleg Nesterov struct wq_barrier barr; 3811da177e4SLinus Torvalds 38214441960SOleg Nesterov active = 0; 38383c22520SOleg Nesterov spin_lock_irq(&cwq->lock); 38483c22520SOleg Nesterov if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { 3851a4d9b0aSOleg Nesterov insert_wq_barrier(cwq, &barr, &cwq->worklist); 38683c22520SOleg Nesterov active = 1; 38783c22520SOleg Nesterov } 38883c22520SOleg Nesterov spin_unlock_irq(&cwq->lock); 3891da177e4SLinus Torvalds 390d721304dSOleg Nesterov if (active) 391fc2e4d70SOleg Nesterov wait_for_completion(&barr.done); 3921da177e4SLinus Torvalds } 39314441960SOleg Nesterov 39414441960SOleg Nesterov return active; 39583c22520SOleg Nesterov } 3961da177e4SLinus Torvalds 3970fcb78c2SRolf Eike Beer /** 3981da177e4SLinus Torvalds * flush_workqueue - ensure that any scheduled work has run to completion. 3990fcb78c2SRolf Eike Beer * @wq: workqueue to flush 4001da177e4SLinus Torvalds * 4011da177e4SLinus Torvalds * Forces execution of the workqueue and blocks until its completion. 4021da177e4SLinus Torvalds * This is typically used in driver shutdown handlers. 4031da177e4SLinus Torvalds * 404fc2e4d70SOleg Nesterov * We sleep until all works which were queued on entry have been handled, 405fc2e4d70SOleg Nesterov * but we are not livelocked by new incoming ones. 4061da177e4SLinus Torvalds * 4071da177e4SLinus Torvalds * This function used to run the workqueues itself. Now we just wait for the 4081da177e4SLinus Torvalds * helper threads to do it. 4091da177e4SLinus Torvalds */ 4107ad5b3a5SHarvey Harrison void flush_workqueue(struct workqueue_struct *wq) 4111da177e4SLinus Torvalds { 412b1f4ec17SOleg Nesterov const cpumask_t *cpu_map = wq_cpu_map(wq); 413cce1a165SOleg Nesterov int cpu; 414b1f4ec17SOleg Nesterov 415f293ea92SOleg Nesterov might_sleep(); 4163295f0efSIngo Molnar lock_map_acquire(&wq->lockdep_map); 4173295f0efSIngo Molnar lock_map_release(&wq->lockdep_map); 418363ab6f1SMike Travis for_each_cpu_mask_nr(cpu, *cpu_map) 41989ada679SChristoph Lameter flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 4201da177e4SLinus Torvalds } 421ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue); 4221da177e4SLinus Torvalds 423db700897SOleg Nesterov /** 424db700897SOleg Nesterov * flush_work - block until a work_struct's callback has terminated 425db700897SOleg Nesterov * @work: the work which is to be flushed 426db700897SOleg Nesterov * 427a67da70dSOleg Nesterov * Returns false if @work has already terminated. 428a67da70dSOleg Nesterov * 429db700897SOleg Nesterov * It is expected that, prior to calling flush_work(), the caller has 430db700897SOleg Nesterov * arranged for the work to not be requeued, otherwise it doesn't make 431db700897SOleg Nesterov * sense to use this function. 432db700897SOleg Nesterov */ 433db700897SOleg Nesterov int flush_work(struct work_struct *work) 434db700897SOleg Nesterov { 435db700897SOleg Nesterov struct cpu_workqueue_struct *cwq; 436db700897SOleg Nesterov struct list_head *prev; 437db700897SOleg Nesterov struct wq_barrier barr; 438db700897SOleg Nesterov 439db700897SOleg Nesterov might_sleep(); 440db700897SOleg Nesterov cwq = get_wq_data(work); 441db700897SOleg Nesterov if (!cwq) 442db700897SOleg Nesterov return 0; 443db700897SOleg Nesterov 4443295f0efSIngo Molnar lock_map_acquire(&cwq->wq->lockdep_map); 4453295f0efSIngo Molnar lock_map_release(&cwq->wq->lockdep_map); 446a67da70dSOleg Nesterov 447db700897SOleg Nesterov prev = NULL; 448db700897SOleg Nesterov spin_lock_irq(&cwq->lock); 449db700897SOleg Nesterov if (!list_empty(&work->entry)) { 450db700897SOleg Nesterov /* 451db700897SOleg Nesterov * See the comment near try_to_grab_pending()->smp_rmb(). 452db700897SOleg Nesterov * If it was re-queued under us we are not going to wait. 453db700897SOleg Nesterov */ 454db700897SOleg Nesterov smp_rmb(); 455db700897SOleg Nesterov if (unlikely(cwq != get_wq_data(work))) 456db700897SOleg Nesterov goto out; 457db700897SOleg Nesterov prev = &work->entry; 458db700897SOleg Nesterov } else { 459db700897SOleg Nesterov if (cwq->current_work != work) 460db700897SOleg Nesterov goto out; 461db700897SOleg Nesterov prev = &cwq->worklist; 462db700897SOleg Nesterov } 463db700897SOleg Nesterov insert_wq_barrier(cwq, &barr, prev->next); 464db700897SOleg Nesterov out: 465db700897SOleg Nesterov spin_unlock_irq(&cwq->lock); 466db700897SOleg Nesterov if (!prev) 467db700897SOleg Nesterov return 0; 468db700897SOleg Nesterov 469db700897SOleg Nesterov wait_for_completion(&barr.done); 470db700897SOleg Nesterov return 1; 471db700897SOleg Nesterov } 472db700897SOleg Nesterov EXPORT_SYMBOL_GPL(flush_work); 473db700897SOleg Nesterov 4746e84d644SOleg Nesterov /* 4751f1f642eSOleg Nesterov * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, 4766e84d644SOleg Nesterov * so this work can't be re-armed in any way. 4776e84d644SOleg Nesterov */ 4786e84d644SOleg Nesterov static int try_to_grab_pending(struct work_struct *work) 4796e84d644SOleg Nesterov { 4806e84d644SOleg Nesterov struct cpu_workqueue_struct *cwq; 4811f1f642eSOleg Nesterov int ret = -1; 4826e84d644SOleg Nesterov 4836e84d644SOleg Nesterov if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) 4841f1f642eSOleg Nesterov return 0; 4856e84d644SOleg Nesterov 4866e84d644SOleg Nesterov /* 4876e84d644SOleg Nesterov * The queueing is in progress, or it is already queued. Try to 4886e84d644SOleg Nesterov * steal it from ->worklist without clearing WORK_STRUCT_PENDING. 4896e84d644SOleg Nesterov */ 4906e84d644SOleg Nesterov 4916e84d644SOleg Nesterov cwq = get_wq_data(work); 4926e84d644SOleg Nesterov if (!cwq) 4936e84d644SOleg Nesterov return ret; 4946e84d644SOleg Nesterov 4956e84d644SOleg Nesterov spin_lock_irq(&cwq->lock); 4966e84d644SOleg Nesterov if (!list_empty(&work->entry)) { 4976e84d644SOleg Nesterov /* 4986e84d644SOleg Nesterov * This work is queued, but perhaps we locked the wrong cwq. 4996e84d644SOleg Nesterov * In that case we must see the new value after rmb(), see 5006e84d644SOleg Nesterov * insert_work()->wmb(). 5016e84d644SOleg Nesterov */ 5026e84d644SOleg Nesterov smp_rmb(); 5036e84d644SOleg Nesterov if (cwq == get_wq_data(work)) { 5046e84d644SOleg Nesterov list_del_init(&work->entry); 5056e84d644SOleg Nesterov ret = 1; 5066e84d644SOleg Nesterov } 5076e84d644SOleg Nesterov } 5086e84d644SOleg Nesterov spin_unlock_irq(&cwq->lock); 5096e84d644SOleg Nesterov 5106e84d644SOleg Nesterov return ret; 5116e84d644SOleg Nesterov } 5126e84d644SOleg Nesterov 5136e84d644SOleg Nesterov static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, 514b89deed3SOleg Nesterov struct work_struct *work) 515b89deed3SOleg Nesterov { 516b89deed3SOleg Nesterov struct wq_barrier barr; 517b89deed3SOleg Nesterov int running = 0; 518b89deed3SOleg Nesterov 519b89deed3SOleg Nesterov spin_lock_irq(&cwq->lock); 520b89deed3SOleg Nesterov if (unlikely(cwq->current_work == work)) { 5211a4d9b0aSOleg Nesterov insert_wq_barrier(cwq, &barr, cwq->worklist.next); 522b89deed3SOleg Nesterov running = 1; 523b89deed3SOleg Nesterov } 524b89deed3SOleg Nesterov spin_unlock_irq(&cwq->lock); 525b89deed3SOleg Nesterov 5263af24433SOleg Nesterov if (unlikely(running)) 527b89deed3SOleg Nesterov wait_for_completion(&barr.done); 528b89deed3SOleg Nesterov } 529b89deed3SOleg Nesterov 5306e84d644SOleg Nesterov static void wait_on_work(struct work_struct *work) 531b89deed3SOleg Nesterov { 532b89deed3SOleg Nesterov struct cpu_workqueue_struct *cwq; 53328e53bddSOleg Nesterov struct workqueue_struct *wq; 53428e53bddSOleg Nesterov const cpumask_t *cpu_map; 535b1f4ec17SOleg Nesterov int cpu; 536b89deed3SOleg Nesterov 537f293ea92SOleg Nesterov might_sleep(); 538f293ea92SOleg Nesterov 5393295f0efSIngo Molnar lock_map_acquire(&work->lockdep_map); 5403295f0efSIngo Molnar lock_map_release(&work->lockdep_map); 5414e6045f1SJohannes Berg 542b89deed3SOleg Nesterov cwq = get_wq_data(work); 543b89deed3SOleg Nesterov if (!cwq) 5443af24433SOleg Nesterov return; 545b89deed3SOleg Nesterov 54628e53bddSOleg Nesterov wq = cwq->wq; 54728e53bddSOleg Nesterov cpu_map = wq_cpu_map(wq); 54828e53bddSOleg Nesterov 549363ab6f1SMike Travis for_each_cpu_mask_nr(cpu, *cpu_map) 5506e84d644SOleg Nesterov wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 5516e84d644SOleg Nesterov } 5526e84d644SOleg Nesterov 5531f1f642eSOleg Nesterov static int __cancel_work_timer(struct work_struct *work, 5541f1f642eSOleg Nesterov struct timer_list* timer) 5551f1f642eSOleg Nesterov { 5561f1f642eSOleg Nesterov int ret; 5571f1f642eSOleg Nesterov 5581f1f642eSOleg Nesterov do { 5591f1f642eSOleg Nesterov ret = (timer && likely(del_timer(timer))); 5601f1f642eSOleg Nesterov if (!ret) 5611f1f642eSOleg Nesterov ret = try_to_grab_pending(work); 5621f1f642eSOleg Nesterov wait_on_work(work); 5631f1f642eSOleg Nesterov } while (unlikely(ret < 0)); 5641f1f642eSOleg Nesterov 5651f1f642eSOleg Nesterov work_clear_pending(work); 5661f1f642eSOleg Nesterov return ret; 5671f1f642eSOleg Nesterov } 5681f1f642eSOleg Nesterov 5696e84d644SOleg Nesterov /** 5706e84d644SOleg Nesterov * cancel_work_sync - block until a work_struct's callback has terminated 5716e84d644SOleg Nesterov * @work: the work which is to be flushed 5726e84d644SOleg Nesterov * 5731f1f642eSOleg Nesterov * Returns true if @work was pending. 5741f1f642eSOleg Nesterov * 5756e84d644SOleg Nesterov * cancel_work_sync() will cancel the work if it is queued. If the work's 5766e84d644SOleg Nesterov * callback appears to be running, cancel_work_sync() will block until it 5776e84d644SOleg Nesterov * has completed. 5786e84d644SOleg Nesterov * 5796e84d644SOleg Nesterov * It is possible to use this function if the work re-queues itself. It can 5806e84d644SOleg Nesterov * cancel the work even if it migrates to another workqueue, however in that 5816e84d644SOleg Nesterov * case it only guarantees that work->func() has completed on the last queued 5826e84d644SOleg Nesterov * workqueue. 5836e84d644SOleg Nesterov * 5846e84d644SOleg Nesterov * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not 5856e84d644SOleg Nesterov * pending, otherwise it goes into a busy-wait loop until the timer expires. 5866e84d644SOleg Nesterov * 5876e84d644SOleg Nesterov * The caller must ensure that workqueue_struct on which this work was last 5886e84d644SOleg Nesterov * queued can't be destroyed before this function returns. 5896e84d644SOleg Nesterov */ 5901f1f642eSOleg Nesterov int cancel_work_sync(struct work_struct *work) 5916e84d644SOleg Nesterov { 5921f1f642eSOleg Nesterov return __cancel_work_timer(work, NULL); 593b89deed3SOleg Nesterov } 59428e53bddSOleg Nesterov EXPORT_SYMBOL_GPL(cancel_work_sync); 595b89deed3SOleg Nesterov 5966e84d644SOleg Nesterov /** 597f5a421a4SOleg Nesterov * cancel_delayed_work_sync - reliably kill off a delayed work. 5986e84d644SOleg Nesterov * @dwork: the delayed work struct 5996e84d644SOleg Nesterov * 6001f1f642eSOleg Nesterov * Returns true if @dwork was pending. 6011f1f642eSOleg Nesterov * 6026e84d644SOleg Nesterov * It is possible to use this function if @dwork rearms itself via queue_work() 6036e84d644SOleg Nesterov * or queue_delayed_work(). See also the comment for cancel_work_sync(). 6046e84d644SOleg Nesterov */ 6051f1f642eSOleg Nesterov int cancel_delayed_work_sync(struct delayed_work *dwork) 6066e84d644SOleg Nesterov { 6071f1f642eSOleg Nesterov return __cancel_work_timer(&dwork->work, &dwork->timer); 6086e84d644SOleg Nesterov } 609f5a421a4SOleg Nesterov EXPORT_SYMBOL(cancel_delayed_work_sync); 6101da177e4SLinus Torvalds 6116e84d644SOleg Nesterov static struct workqueue_struct *keventd_wq __read_mostly; 6121da177e4SLinus Torvalds 6130fcb78c2SRolf Eike Beer /** 6140fcb78c2SRolf Eike Beer * schedule_work - put work task in global workqueue 6150fcb78c2SRolf Eike Beer * @work: job to be done 6160fcb78c2SRolf Eike Beer * 6170fcb78c2SRolf Eike Beer * This puts a job in the kernel-global workqueue. 6180fcb78c2SRolf Eike Beer */ 6197ad5b3a5SHarvey Harrison int schedule_work(struct work_struct *work) 6201da177e4SLinus Torvalds { 6211da177e4SLinus Torvalds return queue_work(keventd_wq, work); 6221da177e4SLinus Torvalds } 623ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work); 6241da177e4SLinus Torvalds 625c1a220e7SZhang Rui /* 626c1a220e7SZhang Rui * schedule_work_on - put work task on a specific cpu 627c1a220e7SZhang Rui * @cpu: cpu to put the work task on 628c1a220e7SZhang Rui * @work: job to be done 629c1a220e7SZhang Rui * 630c1a220e7SZhang Rui * This puts a job on a specific cpu 631c1a220e7SZhang Rui */ 632c1a220e7SZhang Rui int schedule_work_on(int cpu, struct work_struct *work) 633c1a220e7SZhang Rui { 634c1a220e7SZhang Rui return queue_work_on(cpu, keventd_wq, work); 635c1a220e7SZhang Rui } 636c1a220e7SZhang Rui EXPORT_SYMBOL(schedule_work_on); 637c1a220e7SZhang Rui 6380fcb78c2SRolf Eike Beer /** 6390fcb78c2SRolf Eike Beer * schedule_delayed_work - put work task in global workqueue after delay 64052bad64dSDavid Howells * @dwork: job to be done 64152bad64dSDavid Howells * @delay: number of jiffies to wait or 0 for immediate execution 6420fcb78c2SRolf Eike Beer * 6430fcb78c2SRolf Eike Beer * After waiting for a given time this puts a job in the kernel-global 6440fcb78c2SRolf Eike Beer * workqueue. 6450fcb78c2SRolf Eike Beer */ 6467ad5b3a5SHarvey Harrison int schedule_delayed_work(struct delayed_work *dwork, 64782f67cd9SIngo Molnar unsigned long delay) 6481da177e4SLinus Torvalds { 64952bad64dSDavid Howells return queue_delayed_work(keventd_wq, dwork, delay); 6501da177e4SLinus Torvalds } 651ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work); 6521da177e4SLinus Torvalds 6530fcb78c2SRolf Eike Beer /** 6540fcb78c2SRolf Eike Beer * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 6550fcb78c2SRolf Eike Beer * @cpu: cpu to use 65652bad64dSDavid Howells * @dwork: job to be done 6570fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait 6580fcb78c2SRolf Eike Beer * 6590fcb78c2SRolf Eike Beer * After waiting for a given time this puts a job in the kernel-global 6600fcb78c2SRolf Eike Beer * workqueue on the specified CPU. 6610fcb78c2SRolf Eike Beer */ 6621da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu, 66352bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 6641da177e4SLinus Torvalds { 66552bad64dSDavid Howells return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); 6661da177e4SLinus Torvalds } 667ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on); 6681da177e4SLinus Torvalds 669b6136773SAndrew Morton /** 670b6136773SAndrew Morton * schedule_on_each_cpu - call a function on each online CPU from keventd 671b6136773SAndrew Morton * @func: the function to call 672b6136773SAndrew Morton * 673b6136773SAndrew Morton * Returns zero on success. 674b6136773SAndrew Morton * Returns -ve errno on failure. 675b6136773SAndrew Morton * 676b6136773SAndrew Morton * schedule_on_each_cpu() is very slow. 677b6136773SAndrew Morton */ 67865f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func) 67915316ba8SChristoph Lameter { 68015316ba8SChristoph Lameter int cpu; 681b6136773SAndrew Morton struct work_struct *works; 68215316ba8SChristoph Lameter 683b6136773SAndrew Morton works = alloc_percpu(struct work_struct); 684b6136773SAndrew Morton if (!works) 68515316ba8SChristoph Lameter return -ENOMEM; 686b6136773SAndrew Morton 68795402b38SGautham R Shenoy get_online_cpus(); 68815316ba8SChristoph Lameter for_each_online_cpu(cpu) { 6899bfb1839SIngo Molnar struct work_struct *work = per_cpu_ptr(works, cpu); 6909bfb1839SIngo Molnar 6919bfb1839SIngo Molnar INIT_WORK(work, func); 6928de6d308SOleg Nesterov schedule_work_on(cpu, work); 69315316ba8SChristoph Lameter } 6948616a89aSOleg Nesterov for_each_online_cpu(cpu) 6958616a89aSOleg Nesterov flush_work(per_cpu_ptr(works, cpu)); 69695402b38SGautham R Shenoy put_online_cpus(); 697b6136773SAndrew Morton free_percpu(works); 69815316ba8SChristoph Lameter return 0; 69915316ba8SChristoph Lameter } 70015316ba8SChristoph Lameter 7011da177e4SLinus Torvalds void flush_scheduled_work(void) 7021da177e4SLinus Torvalds { 7031da177e4SLinus Torvalds flush_workqueue(keventd_wq); 7041da177e4SLinus Torvalds } 705ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work); 7061da177e4SLinus Torvalds 7071da177e4SLinus Torvalds /** 7081fa44ecaSJames Bottomley * execute_in_process_context - reliably execute the routine with user context 7091fa44ecaSJames Bottomley * @fn: the function to execute 7101fa44ecaSJames Bottomley * @ew: guaranteed storage for the execute work structure (must 7111fa44ecaSJames Bottomley * be available when the work executes) 7121fa44ecaSJames Bottomley * 7131fa44ecaSJames Bottomley * Executes the function immediately if process context is available, 7141fa44ecaSJames Bottomley * otherwise schedules the function for delayed execution. 7151fa44ecaSJames Bottomley * 7161fa44ecaSJames Bottomley * Returns: 0 - function was executed 7171fa44ecaSJames Bottomley * 1 - function was scheduled for execution 7181fa44ecaSJames Bottomley */ 71965f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew) 7201fa44ecaSJames Bottomley { 7211fa44ecaSJames Bottomley if (!in_interrupt()) { 72265f27f38SDavid Howells fn(&ew->work); 7231fa44ecaSJames Bottomley return 0; 7241fa44ecaSJames Bottomley } 7251fa44ecaSJames Bottomley 72665f27f38SDavid Howells INIT_WORK(&ew->work, fn); 7271fa44ecaSJames Bottomley schedule_work(&ew->work); 7281fa44ecaSJames Bottomley 7291fa44ecaSJames Bottomley return 1; 7301fa44ecaSJames Bottomley } 7311fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context); 7321fa44ecaSJames Bottomley 7331da177e4SLinus Torvalds int keventd_up(void) 7341da177e4SLinus Torvalds { 7351da177e4SLinus Torvalds return keventd_wq != NULL; 7361da177e4SLinus Torvalds } 7371da177e4SLinus Torvalds 7381da177e4SLinus Torvalds int current_is_keventd(void) 7391da177e4SLinus Torvalds { 7401da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq; 741d243769dSHugh Dickins int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */ 7421da177e4SLinus Torvalds int ret = 0; 7431da177e4SLinus Torvalds 7441da177e4SLinus Torvalds BUG_ON(!keventd_wq); 7451da177e4SLinus Torvalds 74689ada679SChristoph Lameter cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu); 7471da177e4SLinus Torvalds if (current == cwq->thread) 7481da177e4SLinus Torvalds ret = 1; 7491da177e4SLinus Torvalds 7501da177e4SLinus Torvalds return ret; 7511da177e4SLinus Torvalds 7521da177e4SLinus Torvalds } 7531da177e4SLinus Torvalds 7543af24433SOleg Nesterov static struct cpu_workqueue_struct * 7553af24433SOleg Nesterov init_cpu_workqueue(struct workqueue_struct *wq, int cpu) 7561da177e4SLinus Torvalds { 75789ada679SChristoph Lameter struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); 7583af24433SOleg Nesterov 7593af24433SOleg Nesterov cwq->wq = wq; 7603af24433SOleg Nesterov spin_lock_init(&cwq->lock); 7613af24433SOleg Nesterov INIT_LIST_HEAD(&cwq->worklist); 7623af24433SOleg Nesterov init_waitqueue_head(&cwq->more_work); 7633af24433SOleg Nesterov 7643af24433SOleg Nesterov return cwq; 7653af24433SOleg Nesterov } 7663af24433SOleg Nesterov 7673af24433SOleg Nesterov static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) 7683af24433SOleg Nesterov { 7693af24433SOleg Nesterov struct workqueue_struct *wq = cwq->wq; 7703af24433SOleg Nesterov const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d"; 7713af24433SOleg Nesterov struct task_struct *p; 7723af24433SOleg Nesterov 7733af24433SOleg Nesterov p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu); 7743af24433SOleg Nesterov /* 7753af24433SOleg Nesterov * Nobody can add the work_struct to this cwq, 7763af24433SOleg Nesterov * if (caller is __create_workqueue) 7773af24433SOleg Nesterov * nobody should see this wq 7783af24433SOleg Nesterov * else // caller is CPU_UP_PREPARE 7793af24433SOleg Nesterov * cpu is not on cpu_online_map 7803af24433SOleg Nesterov * so we can abort safely. 7813af24433SOleg Nesterov */ 7823af24433SOleg Nesterov if (IS_ERR(p)) 7833af24433SOleg Nesterov return PTR_ERR(p); 7843af24433SOleg Nesterov 7853af24433SOleg Nesterov cwq->thread = p; 7863af24433SOleg Nesterov 7873af24433SOleg Nesterov return 0; 7883af24433SOleg Nesterov } 7893af24433SOleg Nesterov 79006ba38a9SOleg Nesterov static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) 79106ba38a9SOleg Nesterov { 79206ba38a9SOleg Nesterov struct task_struct *p = cwq->thread; 79306ba38a9SOleg Nesterov 79406ba38a9SOleg Nesterov if (p != NULL) { 79506ba38a9SOleg Nesterov if (cpu >= 0) 79606ba38a9SOleg Nesterov kthread_bind(p, cpu); 79706ba38a9SOleg Nesterov wake_up_process(p); 79806ba38a9SOleg Nesterov } 79906ba38a9SOleg Nesterov } 80006ba38a9SOleg Nesterov 8014e6045f1SJohannes Berg struct workqueue_struct *__create_workqueue_key(const char *name, 8024e6045f1SJohannes Berg int singlethread, 8034e6045f1SJohannes Berg int freezeable, 804eb13ba87SJohannes Berg struct lock_class_key *key, 805eb13ba87SJohannes Berg const char *lock_name) 8063af24433SOleg Nesterov { 8073af24433SOleg Nesterov struct workqueue_struct *wq; 8083af24433SOleg Nesterov struct cpu_workqueue_struct *cwq; 8093af24433SOleg Nesterov int err = 0, cpu; 8103af24433SOleg Nesterov 8113af24433SOleg Nesterov wq = kzalloc(sizeof(*wq), GFP_KERNEL); 8123af24433SOleg Nesterov if (!wq) 8133af24433SOleg Nesterov return NULL; 8143af24433SOleg Nesterov 8153af24433SOleg Nesterov wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); 8163af24433SOleg Nesterov if (!wq->cpu_wq) { 8173af24433SOleg Nesterov kfree(wq); 8183af24433SOleg Nesterov return NULL; 8193af24433SOleg Nesterov } 8203af24433SOleg Nesterov 8213af24433SOleg Nesterov wq->name = name; 822eb13ba87SJohannes Berg lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); 823cce1a165SOleg Nesterov wq->singlethread = singlethread; 8243af24433SOleg Nesterov wq->freezeable = freezeable; 825cce1a165SOleg Nesterov INIT_LIST_HEAD(&wq->list); 8263af24433SOleg Nesterov 8273af24433SOleg Nesterov if (singlethread) { 8283af24433SOleg Nesterov cwq = init_cpu_workqueue(wq, singlethread_cpu); 8293af24433SOleg Nesterov err = create_workqueue_thread(cwq, singlethread_cpu); 83006ba38a9SOleg Nesterov start_workqueue_thread(cwq, -1); 8313af24433SOleg Nesterov } else { 8323da1c84cSOleg Nesterov cpu_maps_update_begin(); 83395402b38SGautham R Shenoy spin_lock(&workqueue_lock); 8343af24433SOleg Nesterov list_add(&wq->list, &workqueues); 83595402b38SGautham R Shenoy spin_unlock(&workqueue_lock); 8363af24433SOleg Nesterov 8373af24433SOleg Nesterov for_each_possible_cpu(cpu) { 8383af24433SOleg Nesterov cwq = init_cpu_workqueue(wq, cpu); 8393af24433SOleg Nesterov if (err || !cpu_online(cpu)) 8403af24433SOleg Nesterov continue; 8413af24433SOleg Nesterov err = create_workqueue_thread(cwq, cpu); 84206ba38a9SOleg Nesterov start_workqueue_thread(cwq, cpu); 8433af24433SOleg Nesterov } 8443da1c84cSOleg Nesterov cpu_maps_update_done(); 8453af24433SOleg Nesterov } 8463af24433SOleg Nesterov 8473af24433SOleg Nesterov if (err) { 8483af24433SOleg Nesterov destroy_workqueue(wq); 8493af24433SOleg Nesterov wq = NULL; 8503af24433SOleg Nesterov } 8513af24433SOleg Nesterov return wq; 8523af24433SOleg Nesterov } 8534e6045f1SJohannes Berg EXPORT_SYMBOL_GPL(__create_workqueue_key); 8543af24433SOleg Nesterov 8551e35eaa2SOleg Nesterov static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) 8563af24433SOleg Nesterov { 8573af24433SOleg Nesterov /* 8583da1c84cSOleg Nesterov * Our caller is either destroy_workqueue() or CPU_POST_DEAD, 8593da1c84cSOleg Nesterov * cpu_add_remove_lock protects cwq->thread. 8603af24433SOleg Nesterov */ 86114441960SOleg Nesterov if (cwq->thread == NULL) 86214441960SOleg Nesterov return; 86314441960SOleg Nesterov 8643295f0efSIngo Molnar lock_map_acquire(&cwq->wq->lockdep_map); 8653295f0efSIngo Molnar lock_map_release(&cwq->wq->lockdep_map); 8664e6045f1SJohannes Berg 86713c22168SOleg Nesterov flush_cpu_workqueue(cwq); 86814441960SOleg Nesterov /* 8693da1c84cSOleg Nesterov * If the caller is CPU_POST_DEAD and cwq->worklist was not empty, 87013c22168SOleg Nesterov * a concurrent flush_workqueue() can insert a barrier after us. 87113c22168SOleg Nesterov * However, in that case run_workqueue() won't return and check 87213c22168SOleg Nesterov * kthread_should_stop() until it flushes all work_struct's. 87314441960SOleg Nesterov * When ->worklist becomes empty it is safe to exit because no 87414441960SOleg Nesterov * more work_structs can be queued on this cwq: flush_workqueue 87514441960SOleg Nesterov * checks list_empty(), and a "normal" queue_work() can't use 87614441960SOleg Nesterov * a dead CPU. 87714441960SOleg Nesterov */ 87814441960SOleg Nesterov kthread_stop(cwq->thread); 87914441960SOleg Nesterov cwq->thread = NULL; 8801da177e4SLinus Torvalds } 8811da177e4SLinus Torvalds 8823af24433SOleg Nesterov /** 8833af24433SOleg Nesterov * destroy_workqueue - safely terminate a workqueue 8843af24433SOleg Nesterov * @wq: target workqueue 8853af24433SOleg Nesterov * 8863af24433SOleg Nesterov * Safely destroy a workqueue. All work currently pending will be done first. 8873af24433SOleg Nesterov */ 8883af24433SOleg Nesterov void destroy_workqueue(struct workqueue_struct *wq) 8893af24433SOleg Nesterov { 890b1f4ec17SOleg Nesterov const cpumask_t *cpu_map = wq_cpu_map(wq); 8913af24433SOleg Nesterov int cpu; 8923af24433SOleg Nesterov 8933da1c84cSOleg Nesterov cpu_maps_update_begin(); 89495402b38SGautham R Shenoy spin_lock(&workqueue_lock); 8953af24433SOleg Nesterov list_del(&wq->list); 89695402b38SGautham R Shenoy spin_unlock(&workqueue_lock); 8973af24433SOleg Nesterov 898363ab6f1SMike Travis for_each_cpu_mask_nr(cpu, *cpu_map) 8991e35eaa2SOleg Nesterov cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); 9003da1c84cSOleg Nesterov cpu_maps_update_done(); 9013af24433SOleg Nesterov 9023af24433SOleg Nesterov free_percpu(wq->cpu_wq); 9033af24433SOleg Nesterov kfree(wq); 9043af24433SOleg Nesterov } 9053af24433SOleg Nesterov EXPORT_SYMBOL_GPL(destroy_workqueue); 9063af24433SOleg Nesterov 9079c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, 9081da177e4SLinus Torvalds unsigned long action, 9091da177e4SLinus Torvalds void *hcpu) 9101da177e4SLinus Torvalds { 9113af24433SOleg Nesterov unsigned int cpu = (unsigned long)hcpu; 9123af24433SOleg Nesterov struct cpu_workqueue_struct *cwq; 9131da177e4SLinus Torvalds struct workqueue_struct *wq; 9148448502cSOleg Nesterov int ret = NOTIFY_OK; 9151da177e4SLinus Torvalds 9168bb78442SRafael J. Wysocki action &= ~CPU_TASKS_FROZEN; 9178bb78442SRafael J. Wysocki 9181da177e4SLinus Torvalds switch (action) { 9193af24433SOleg Nesterov case CPU_UP_PREPARE: 9203af24433SOleg Nesterov cpu_set(cpu, cpu_populated_map); 9213af24433SOleg Nesterov } 9228448502cSOleg Nesterov undo: 9231da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 9243af24433SOleg Nesterov cwq = per_cpu_ptr(wq->cpu_wq, cpu); 9253af24433SOleg Nesterov 9263af24433SOleg Nesterov switch (action) { 9273af24433SOleg Nesterov case CPU_UP_PREPARE: 9283af24433SOleg Nesterov if (!create_workqueue_thread(cwq, cpu)) 9291da177e4SLinus Torvalds break; 93095402b38SGautham R Shenoy printk(KERN_ERR "workqueue [%s] for %i failed\n", 93195402b38SGautham R Shenoy wq->name, cpu); 9328448502cSOleg Nesterov action = CPU_UP_CANCELED; 9338448502cSOleg Nesterov ret = NOTIFY_BAD; 9348448502cSOleg Nesterov goto undo; 9351da177e4SLinus Torvalds 9361da177e4SLinus Torvalds case CPU_ONLINE: 93706ba38a9SOleg Nesterov start_workqueue_thread(cwq, cpu); 9381da177e4SLinus Torvalds break; 9391da177e4SLinus Torvalds 9401da177e4SLinus Torvalds case CPU_UP_CANCELED: 94106ba38a9SOleg Nesterov start_workqueue_thread(cwq, -1); 9423da1c84cSOleg Nesterov case CPU_POST_DEAD: 9431e35eaa2SOleg Nesterov cleanup_workqueue_thread(cwq); 9441da177e4SLinus Torvalds break; 9451da177e4SLinus Torvalds } 9463af24433SOleg Nesterov } 9471da177e4SLinus Torvalds 94800dfcaf7SOleg Nesterov switch (action) { 94900dfcaf7SOleg Nesterov case CPU_UP_CANCELED: 9503da1c84cSOleg Nesterov case CPU_POST_DEAD: 95100dfcaf7SOleg Nesterov cpu_clear(cpu, cpu_populated_map); 95200dfcaf7SOleg Nesterov } 95300dfcaf7SOleg Nesterov 9548448502cSOleg Nesterov return ret; 9551da177e4SLinus Torvalds } 9561da177e4SLinus Torvalds 957c12920d1SOleg Nesterov void __init init_workqueues(void) 9581da177e4SLinus Torvalds { 9593af24433SOleg Nesterov cpu_populated_map = cpu_online_map; 960f756d5e2SNathan Lynch singlethread_cpu = first_cpu(cpu_possible_map); 961b1f4ec17SOleg Nesterov cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu); 9621da177e4SLinus Torvalds hotcpu_notifier(workqueue_cpu_callback, 0); 9631da177e4SLinus Torvalds keventd_wq = create_workqueue("events"); 9641da177e4SLinus Torvalds BUG_ON(!keventd_wq); 9651da177e4SLinus Torvalds } 966