1 /* 2 * kernel/workqueue.c - generic async execution with shared worker pool 3 * 4 * Copyright (C) 2002 Ingo Molnar 5 * 6 * Derived from the taskqueue/keventd code by: 7 * David Woodhouse <dwmw2@infradead.org> 8 * Andrew Morton 9 * Kai Petzke <wpp@marie.physik.tu-berlin.de> 10 * Theodore Ts'o <tytso@mit.edu> 11 * 12 * Made to use alloc_percpu by Christoph Lameter. 13 * 14 * Copyright (C) 2010 SUSE Linux Products GmbH 15 * Copyright (C) 2010 Tejun Heo <tj@kernel.org> 16 * 17 * This is the generic async execution mechanism. Work items as are 18 * executed in process context. The worker pool is shared and 19 * automatically managed. There is one worker pool for each CPU and 20 * one extra for works which are better served by workers which are 21 * not bound to any specific CPU. 22 * 23 * Please read Documentation/workqueue.txt for details. 24 */ 25 26 #include <linux/export.h> 27 #include <linux/kernel.h> 28 #include <linux/sched.h> 29 #include <linux/init.h> 30 #include <linux/signal.h> 31 #include <linux/completion.h> 32 #include <linux/workqueue.h> 33 #include <linux/slab.h> 34 #include <linux/cpu.h> 35 #include <linux/notifier.h> 36 #include <linux/kthread.h> 37 #include <linux/hardirq.h> 38 #include <linux/mempolicy.h> 39 #include <linux/freezer.h> 40 #include <linux/kallsyms.h> 41 #include <linux/debug_locks.h> 42 #include <linux/lockdep.h> 43 #include <linux/idr.h> 44 45 #include "workqueue_sched.h" 46 47 enum { 48 /* global_cwq flags */ 49 GCWQ_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ 50 GCWQ_MANAGING_WORKERS = 1 << 1, /* managing workers */ 51 GCWQ_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ 52 GCWQ_FREEZING = 1 << 3, /* freeze in progress */ 53 GCWQ_HIGHPRI_PENDING = 1 << 4, /* highpri works on queue */ 54 55 /* worker flags */ 56 WORKER_STARTED = 1 << 0, /* started */ 57 WORKER_DIE = 1 << 1, /* die die die */ 58 WORKER_IDLE = 1 << 2, /* is idle */ 59 WORKER_PREP = 1 << 3, /* preparing to run works */ 60 WORKER_ROGUE = 1 << 4, /* not bound to any cpu */ 61 WORKER_REBIND = 1 << 5, /* mom is home, come back */ 62 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ 63 WORKER_UNBOUND = 1 << 7, /* worker is unbound */ 64 65 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND | 66 WORKER_CPU_INTENSIVE | WORKER_UNBOUND, 67 68 /* gcwq->trustee_state */ 69 TRUSTEE_START = 0, /* start */ 70 TRUSTEE_IN_CHARGE = 1, /* trustee in charge of gcwq */ 71 TRUSTEE_BUTCHER = 2, /* butcher workers */ 72 TRUSTEE_RELEASE = 3, /* release workers */ 73 TRUSTEE_DONE = 4, /* trustee is done */ 74 75 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ 76 BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER, 77 BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1, 78 79 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ 80 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ 81 82 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, 83 /* call for help after 10ms 84 (min two ticks) */ 85 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ 86 CREATE_COOLDOWN = HZ, /* time to breath after fail */ 87 TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ 88 89 /* 90 * Rescue workers are used only on emergencies and shared by 91 * all cpus. Give -20. 92 */ 93 RESCUER_NICE_LEVEL = -20, 94 }; 95 96 /* 97 * Structure fields follow one of the following exclusion rules. 98 * 99 * I: Modifiable by initialization/destruction paths and read-only for 100 * everyone else. 101 * 102 * P: Preemption protected. Disabling preemption is enough and should 103 * only be modified and accessed from the local cpu. 104 * 105 * L: gcwq->lock protected. Access with gcwq->lock held. 106 * 107 * X: During normal operation, modification requires gcwq->lock and 108 * should be done only from local cpu. Either disabling preemption 109 * on local cpu or grabbing gcwq->lock is enough for read access. 110 * If GCWQ_DISASSOCIATED is set, it's identical to L. 111 * 112 * F: wq->flush_mutex protected. 113 * 114 * W: workqueue_lock protected. 115 */ 116 117 struct global_cwq; 118 119 /* 120 * The poor guys doing the actual heavy lifting. All on-duty workers 121 * are either serving the manager role, on idle list or on busy hash. 122 */ 123 struct worker { 124 /* on idle list while idle, on busy hash table while busy */ 125 union { 126 struct list_head entry; /* L: while idle */ 127 struct hlist_node hentry; /* L: while busy */ 128 }; 129 130 struct work_struct *current_work; /* L: work being processed */ 131 struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */ 132 struct list_head scheduled; /* L: scheduled works */ 133 struct task_struct *task; /* I: worker task */ 134 struct global_cwq *gcwq; /* I: the associated gcwq */ 135 /* 64 bytes boundary on 64bit, 32 on 32bit */ 136 unsigned long last_active; /* L: last active timestamp */ 137 unsigned int flags; /* X: flags */ 138 int id; /* I: worker id */ 139 struct work_struct rebind_work; /* L: rebind worker to cpu */ 140 }; 141 142 /* 143 * Global per-cpu workqueue. There's one and only one for each cpu 144 * and all works are queued and processed here regardless of their 145 * target workqueues. 146 */ 147 struct global_cwq { 148 spinlock_t lock; /* the gcwq lock */ 149 struct list_head worklist; /* L: list of pending works */ 150 unsigned int cpu; /* I: the associated cpu */ 151 unsigned int flags; /* L: GCWQ_* flags */ 152 153 int nr_workers; /* L: total number of workers */ 154 int nr_idle; /* L: currently idle ones */ 155 156 /* workers are chained either in the idle_list or busy_hash */ 157 struct list_head idle_list; /* X: list of idle workers */ 158 struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE]; 159 /* L: hash of busy workers */ 160 161 struct timer_list idle_timer; /* L: worker idle timeout */ 162 struct timer_list mayday_timer; /* L: SOS timer for dworkers */ 163 164 struct ida worker_ida; /* L: for worker IDs */ 165 166 struct task_struct *trustee; /* L: for gcwq shutdown */ 167 unsigned int trustee_state; /* L: trustee state */ 168 wait_queue_head_t trustee_wait; /* trustee wait */ 169 struct worker *first_idle; /* L: first idle worker */ 170 } ____cacheline_aligned_in_smp; 171 172 /* 173 * The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of 174 * work_struct->data are used for flags and thus cwqs need to be 175 * aligned at two's power of the number of flag bits. 176 */ 177 struct cpu_workqueue_struct { 178 struct global_cwq *gcwq; /* I: the associated gcwq */ 179 struct workqueue_struct *wq; /* I: the owning workqueue */ 180 int work_color; /* L: current color */ 181 int flush_color; /* L: flushing color */ 182 int nr_in_flight[WORK_NR_COLORS]; 183 /* L: nr of in_flight works */ 184 int nr_active; /* L: nr of active works */ 185 int max_active; /* L: max active works */ 186 struct list_head delayed_works; /* L: delayed works */ 187 }; 188 189 /* 190 * Structure used to wait for workqueue flush. 191 */ 192 struct wq_flusher { 193 struct list_head list; /* F: list of flushers */ 194 int flush_color; /* F: flush color waiting for */ 195 struct completion done; /* flush completion */ 196 }; 197 198 /* 199 * All cpumasks are assumed to be always set on UP and thus can't be 200 * used to determine whether there's something to be done. 201 */ 202 #ifdef CONFIG_SMP 203 typedef cpumask_var_t mayday_mask_t; 204 #define mayday_test_and_set_cpu(cpu, mask) \ 205 cpumask_test_and_set_cpu((cpu), (mask)) 206 #define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask)) 207 #define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask)) 208 #define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp)) 209 #define free_mayday_mask(mask) free_cpumask_var((mask)) 210 #else 211 typedef unsigned long mayday_mask_t; 212 #define mayday_test_and_set_cpu(cpu, mask) test_and_set_bit(0, &(mask)) 213 #define mayday_clear_cpu(cpu, mask) clear_bit(0, &(mask)) 214 #define for_each_mayday_cpu(cpu, mask) if ((cpu) = 0, (mask)) 215 #define alloc_mayday_mask(maskp, gfp) true 216 #define free_mayday_mask(mask) do { } while (0) 217 #endif 218 219 /* 220 * The externally visible workqueue abstraction is an array of 221 * per-CPU workqueues: 222 */ 223 struct workqueue_struct { 224 unsigned int flags; /* W: WQ_* flags */ 225 union { 226 struct cpu_workqueue_struct __percpu *pcpu; 227 struct cpu_workqueue_struct *single; 228 unsigned long v; 229 } cpu_wq; /* I: cwq's */ 230 struct list_head list; /* W: list of all workqueues */ 231 232 struct mutex flush_mutex; /* protects wq flushing */ 233 int work_color; /* F: current work color */ 234 int flush_color; /* F: current flush color */ 235 atomic_t nr_cwqs_to_flush; /* flush in progress */ 236 struct wq_flusher *first_flusher; /* F: first flusher */ 237 struct list_head flusher_queue; /* F: flush waiters */ 238 struct list_head flusher_overflow; /* F: flush overflow list */ 239 240 mayday_mask_t mayday_mask; /* cpus requesting rescue */ 241 struct worker *rescuer; /* I: rescue worker */ 242 243 int nr_drainers; /* W: drain in progress */ 244 int saved_max_active; /* W: saved cwq max_active */ 245 #ifdef CONFIG_LOCKDEP 246 struct lockdep_map lockdep_map; 247 #endif 248 char name[]; /* I: workqueue name */ 249 }; 250 251 struct workqueue_struct *system_wq __read_mostly; 252 struct workqueue_struct *system_long_wq __read_mostly; 253 struct workqueue_struct *system_nrt_wq __read_mostly; 254 struct workqueue_struct *system_unbound_wq __read_mostly; 255 struct workqueue_struct *system_freezable_wq __read_mostly; 256 struct workqueue_struct *system_nrt_freezable_wq __read_mostly; 257 EXPORT_SYMBOL_GPL(system_wq); 258 EXPORT_SYMBOL_GPL(system_long_wq); 259 EXPORT_SYMBOL_GPL(system_nrt_wq); 260 EXPORT_SYMBOL_GPL(system_unbound_wq); 261 EXPORT_SYMBOL_GPL(system_freezable_wq); 262 EXPORT_SYMBOL_GPL(system_nrt_freezable_wq); 263 264 #define CREATE_TRACE_POINTS 265 #include <trace/events/workqueue.h> 266 267 #define for_each_busy_worker(worker, i, pos, gcwq) \ 268 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \ 269 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry) 270 271 static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask, 272 unsigned int sw) 273 { 274 if (cpu < nr_cpu_ids) { 275 if (sw & 1) { 276 cpu = cpumask_next(cpu, mask); 277 if (cpu < nr_cpu_ids) 278 return cpu; 279 } 280 if (sw & 2) 281 return WORK_CPU_UNBOUND; 282 } 283 return WORK_CPU_NONE; 284 } 285 286 static inline int __next_wq_cpu(int cpu, const struct cpumask *mask, 287 struct workqueue_struct *wq) 288 { 289 return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2); 290 } 291 292 /* 293 * CPU iterators 294 * 295 * An extra gcwq is defined for an invalid cpu number 296 * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any 297 * specific CPU. The following iterators are similar to 298 * for_each_*_cpu() iterators but also considers the unbound gcwq. 299 * 300 * for_each_gcwq_cpu() : possible CPUs + WORK_CPU_UNBOUND 301 * for_each_online_gcwq_cpu() : online CPUs + WORK_CPU_UNBOUND 302 * for_each_cwq_cpu() : possible CPUs for bound workqueues, 303 * WORK_CPU_UNBOUND for unbound workqueues 304 */ 305 #define for_each_gcwq_cpu(cpu) \ 306 for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3); \ 307 (cpu) < WORK_CPU_NONE; \ 308 (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3)) 309 310 #define for_each_online_gcwq_cpu(cpu) \ 311 for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3); \ 312 (cpu) < WORK_CPU_NONE; \ 313 (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3)) 314 315 #define for_each_cwq_cpu(cpu, wq) \ 316 for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq)); \ 317 (cpu) < WORK_CPU_NONE; \ 318 (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq))) 319 320 #ifdef CONFIG_DEBUG_OBJECTS_WORK 321 322 static struct debug_obj_descr work_debug_descr; 323 324 static void *work_debug_hint(void *addr) 325 { 326 return ((struct work_struct *) addr)->func; 327 } 328 329 /* 330 * fixup_init is called when: 331 * - an active object is initialized 332 */ 333 static int work_fixup_init(void *addr, enum debug_obj_state state) 334 { 335 struct work_struct *work = addr; 336 337 switch (state) { 338 case ODEBUG_STATE_ACTIVE: 339 cancel_work_sync(work); 340 debug_object_init(work, &work_debug_descr); 341 return 1; 342 default: 343 return 0; 344 } 345 } 346 347 /* 348 * fixup_activate is called when: 349 * - an active object is activated 350 * - an unknown object is activated (might be a statically initialized object) 351 */ 352 static int work_fixup_activate(void *addr, enum debug_obj_state state) 353 { 354 struct work_struct *work = addr; 355 356 switch (state) { 357 358 case ODEBUG_STATE_NOTAVAILABLE: 359 /* 360 * This is not really a fixup. The work struct was 361 * statically initialized. We just make sure that it 362 * is tracked in the object tracker. 363 */ 364 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) { 365 debug_object_init(work, &work_debug_descr); 366 debug_object_activate(work, &work_debug_descr); 367 return 0; 368 } 369 WARN_ON_ONCE(1); 370 return 0; 371 372 case ODEBUG_STATE_ACTIVE: 373 WARN_ON(1); 374 375 default: 376 return 0; 377 } 378 } 379 380 /* 381 * fixup_free is called when: 382 * - an active object is freed 383 */ 384 static int work_fixup_free(void *addr, enum debug_obj_state state) 385 { 386 struct work_struct *work = addr; 387 388 switch (state) { 389 case ODEBUG_STATE_ACTIVE: 390 cancel_work_sync(work); 391 debug_object_free(work, &work_debug_descr); 392 return 1; 393 default: 394 return 0; 395 } 396 } 397 398 static struct debug_obj_descr work_debug_descr = { 399 .name = "work_struct", 400 .debug_hint = work_debug_hint, 401 .fixup_init = work_fixup_init, 402 .fixup_activate = work_fixup_activate, 403 .fixup_free = work_fixup_free, 404 }; 405 406 static inline void debug_work_activate(struct work_struct *work) 407 { 408 debug_object_activate(work, &work_debug_descr); 409 } 410 411 static inline void debug_work_deactivate(struct work_struct *work) 412 { 413 debug_object_deactivate(work, &work_debug_descr); 414 } 415 416 void __init_work(struct work_struct *work, int onstack) 417 { 418 if (onstack) 419 debug_object_init_on_stack(work, &work_debug_descr); 420 else 421 debug_object_init(work, &work_debug_descr); 422 } 423 EXPORT_SYMBOL_GPL(__init_work); 424 425 void destroy_work_on_stack(struct work_struct *work) 426 { 427 debug_object_free(work, &work_debug_descr); 428 } 429 EXPORT_SYMBOL_GPL(destroy_work_on_stack); 430 431 #else 432 static inline void debug_work_activate(struct work_struct *work) { } 433 static inline void debug_work_deactivate(struct work_struct *work) { } 434 #endif 435 436 /* Serializes the accesses to the list of workqueues. */ 437 static DEFINE_SPINLOCK(workqueue_lock); 438 static LIST_HEAD(workqueues); 439 static bool workqueue_freezing; /* W: have wqs started freezing? */ 440 441 /* 442 * The almighty global cpu workqueues. nr_running is the only field 443 * which is expected to be used frequently by other cpus via 444 * try_to_wake_up(). Put it in a separate cacheline. 445 */ 446 static DEFINE_PER_CPU(struct global_cwq, global_cwq); 447 static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running); 448 449 /* 450 * Global cpu workqueue and nr_running counter for unbound gcwq. The 451 * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its 452 * workers have WORKER_UNBOUND set. 453 */ 454 static struct global_cwq unbound_global_cwq; 455 static atomic_t unbound_gcwq_nr_running = ATOMIC_INIT(0); /* always 0 */ 456 457 static int worker_thread(void *__worker); 458 459 static struct global_cwq *get_gcwq(unsigned int cpu) 460 { 461 if (cpu != WORK_CPU_UNBOUND) 462 return &per_cpu(global_cwq, cpu); 463 else 464 return &unbound_global_cwq; 465 } 466 467 static atomic_t *get_gcwq_nr_running(unsigned int cpu) 468 { 469 if (cpu != WORK_CPU_UNBOUND) 470 return &per_cpu(gcwq_nr_running, cpu); 471 else 472 return &unbound_gcwq_nr_running; 473 } 474 475 static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, 476 struct workqueue_struct *wq) 477 { 478 if (!(wq->flags & WQ_UNBOUND)) { 479 if (likely(cpu < nr_cpu_ids)) 480 return per_cpu_ptr(wq->cpu_wq.pcpu, cpu); 481 } else if (likely(cpu == WORK_CPU_UNBOUND)) 482 return wq->cpu_wq.single; 483 return NULL; 484 } 485 486 static unsigned int work_color_to_flags(int color) 487 { 488 return color << WORK_STRUCT_COLOR_SHIFT; 489 } 490 491 static int get_work_color(struct work_struct *work) 492 { 493 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) & 494 ((1 << WORK_STRUCT_COLOR_BITS) - 1); 495 } 496 497 static int work_next_color(int color) 498 { 499 return (color + 1) % WORK_NR_COLORS; 500 } 501 502 /* 503 * A work's data points to the cwq with WORK_STRUCT_CWQ set while the 504 * work is on queue. Once execution starts, WORK_STRUCT_CWQ is 505 * cleared and the work data contains the cpu number it was last on. 506 * 507 * set_work_{cwq|cpu}() and clear_work_data() can be used to set the 508 * cwq, cpu or clear work->data. These functions should only be 509 * called while the work is owned - ie. while the PENDING bit is set. 510 * 511 * get_work_[g]cwq() can be used to obtain the gcwq or cwq 512 * corresponding to a work. gcwq is available once the work has been 513 * queued anywhere after initialization. cwq is available only from 514 * queueing until execution starts. 515 */ 516 static inline void set_work_data(struct work_struct *work, unsigned long data, 517 unsigned long flags) 518 { 519 BUG_ON(!work_pending(work)); 520 atomic_long_set(&work->data, data | flags | work_static(work)); 521 } 522 523 static void set_work_cwq(struct work_struct *work, 524 struct cpu_workqueue_struct *cwq, 525 unsigned long extra_flags) 526 { 527 set_work_data(work, (unsigned long)cwq, 528 WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags); 529 } 530 531 static void set_work_cpu(struct work_struct *work, unsigned int cpu) 532 { 533 set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING); 534 } 535 536 static void clear_work_data(struct work_struct *work) 537 { 538 set_work_data(work, WORK_STRUCT_NO_CPU, 0); 539 } 540 541 static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work) 542 { 543 unsigned long data = atomic_long_read(&work->data); 544 545 if (data & WORK_STRUCT_CWQ) 546 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK); 547 else 548 return NULL; 549 } 550 551 static struct global_cwq *get_work_gcwq(struct work_struct *work) 552 { 553 unsigned long data = atomic_long_read(&work->data); 554 unsigned int cpu; 555 556 if (data & WORK_STRUCT_CWQ) 557 return ((struct cpu_workqueue_struct *) 558 (data & WORK_STRUCT_WQ_DATA_MASK))->gcwq; 559 560 cpu = data >> WORK_STRUCT_FLAG_BITS; 561 if (cpu == WORK_CPU_NONE) 562 return NULL; 563 564 BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND); 565 return get_gcwq(cpu); 566 } 567 568 /* 569 * Policy functions. These define the policies on how the global 570 * worker pool is managed. Unless noted otherwise, these functions 571 * assume that they're being called with gcwq->lock held. 572 */ 573 574 static bool __need_more_worker(struct global_cwq *gcwq) 575 { 576 return !atomic_read(get_gcwq_nr_running(gcwq->cpu)) || 577 gcwq->flags & GCWQ_HIGHPRI_PENDING; 578 } 579 580 /* 581 * Need to wake up a worker? Called from anything but currently 582 * running workers. 583 */ 584 static bool need_more_worker(struct global_cwq *gcwq) 585 { 586 return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq); 587 } 588 589 /* Can I start working? Called from busy but !running workers. */ 590 static bool may_start_working(struct global_cwq *gcwq) 591 { 592 return gcwq->nr_idle; 593 } 594 595 /* Do I need to keep working? Called from currently running workers. */ 596 static bool keep_working(struct global_cwq *gcwq) 597 { 598 atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu); 599 600 return !list_empty(&gcwq->worklist) && 601 (atomic_read(nr_running) <= 1 || 602 gcwq->flags & GCWQ_HIGHPRI_PENDING); 603 } 604 605 /* Do we need a new worker? Called from manager. */ 606 static bool need_to_create_worker(struct global_cwq *gcwq) 607 { 608 return need_more_worker(gcwq) && !may_start_working(gcwq); 609 } 610 611 /* Do I need to be the manager? */ 612 static bool need_to_manage_workers(struct global_cwq *gcwq) 613 { 614 return need_to_create_worker(gcwq) || gcwq->flags & GCWQ_MANAGE_WORKERS; 615 } 616 617 /* Do we have too many workers and should some go away? */ 618 static bool too_many_workers(struct global_cwq *gcwq) 619 { 620 bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS; 621 int nr_idle = gcwq->nr_idle + managing; /* manager is considered idle */ 622 int nr_busy = gcwq->nr_workers - nr_idle; 623 624 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; 625 } 626 627 /* 628 * Wake up functions. 629 */ 630 631 /* Return the first worker. Safe with preemption disabled */ 632 static struct worker *first_worker(struct global_cwq *gcwq) 633 { 634 if (unlikely(list_empty(&gcwq->idle_list))) 635 return NULL; 636 637 return list_first_entry(&gcwq->idle_list, struct worker, entry); 638 } 639 640 /** 641 * wake_up_worker - wake up an idle worker 642 * @gcwq: gcwq to wake worker for 643 * 644 * Wake up the first idle worker of @gcwq. 645 * 646 * CONTEXT: 647 * spin_lock_irq(gcwq->lock). 648 */ 649 static void wake_up_worker(struct global_cwq *gcwq) 650 { 651 struct worker *worker = first_worker(gcwq); 652 653 if (likely(worker)) 654 wake_up_process(worker->task); 655 } 656 657 /** 658 * wq_worker_waking_up - a worker is waking up 659 * @task: task waking up 660 * @cpu: CPU @task is waking up to 661 * 662 * This function is called during try_to_wake_up() when a worker is 663 * being awoken. 664 * 665 * CONTEXT: 666 * spin_lock_irq(rq->lock) 667 */ 668 void wq_worker_waking_up(struct task_struct *task, unsigned int cpu) 669 { 670 struct worker *worker = kthread_data(task); 671 672 if (!(worker->flags & WORKER_NOT_RUNNING)) 673 atomic_inc(get_gcwq_nr_running(cpu)); 674 } 675 676 /** 677 * wq_worker_sleeping - a worker is going to sleep 678 * @task: task going to sleep 679 * @cpu: CPU in question, must be the current CPU number 680 * 681 * This function is called during schedule() when a busy worker is 682 * going to sleep. Worker on the same cpu can be woken up by 683 * returning pointer to its task. 684 * 685 * CONTEXT: 686 * spin_lock_irq(rq->lock) 687 * 688 * RETURNS: 689 * Worker task on @cpu to wake up, %NULL if none. 690 */ 691 struct task_struct *wq_worker_sleeping(struct task_struct *task, 692 unsigned int cpu) 693 { 694 struct worker *worker = kthread_data(task), *to_wakeup = NULL; 695 struct global_cwq *gcwq = get_gcwq(cpu); 696 atomic_t *nr_running = get_gcwq_nr_running(cpu); 697 698 if (worker->flags & WORKER_NOT_RUNNING) 699 return NULL; 700 701 /* this can only happen on the local cpu */ 702 BUG_ON(cpu != raw_smp_processor_id()); 703 704 /* 705 * The counterpart of the following dec_and_test, implied mb, 706 * worklist not empty test sequence is in insert_work(). 707 * Please read comment there. 708 * 709 * NOT_RUNNING is clear. This means that trustee is not in 710 * charge and we're running on the local cpu w/ rq lock held 711 * and preemption disabled, which in turn means that none else 712 * could be manipulating idle_list, so dereferencing idle_list 713 * without gcwq lock is safe. 714 */ 715 if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist)) 716 to_wakeup = first_worker(gcwq); 717 return to_wakeup ? to_wakeup->task : NULL; 718 } 719 720 /** 721 * worker_set_flags - set worker flags and adjust nr_running accordingly 722 * @worker: self 723 * @flags: flags to set 724 * @wakeup: wakeup an idle worker if necessary 725 * 726 * Set @flags in @worker->flags and adjust nr_running accordingly. If 727 * nr_running becomes zero and @wakeup is %true, an idle worker is 728 * woken up. 729 * 730 * CONTEXT: 731 * spin_lock_irq(gcwq->lock) 732 */ 733 static inline void worker_set_flags(struct worker *worker, unsigned int flags, 734 bool wakeup) 735 { 736 struct global_cwq *gcwq = worker->gcwq; 737 738 WARN_ON_ONCE(worker->task != current); 739 740 /* 741 * If transitioning into NOT_RUNNING, adjust nr_running and 742 * wake up an idle worker as necessary if requested by 743 * @wakeup. 744 */ 745 if ((flags & WORKER_NOT_RUNNING) && 746 !(worker->flags & WORKER_NOT_RUNNING)) { 747 atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu); 748 749 if (wakeup) { 750 if (atomic_dec_and_test(nr_running) && 751 !list_empty(&gcwq->worklist)) 752 wake_up_worker(gcwq); 753 } else 754 atomic_dec(nr_running); 755 } 756 757 worker->flags |= flags; 758 } 759 760 /** 761 * worker_clr_flags - clear worker flags and adjust nr_running accordingly 762 * @worker: self 763 * @flags: flags to clear 764 * 765 * Clear @flags in @worker->flags and adjust nr_running accordingly. 766 * 767 * CONTEXT: 768 * spin_lock_irq(gcwq->lock) 769 */ 770 static inline void worker_clr_flags(struct worker *worker, unsigned int flags) 771 { 772 struct global_cwq *gcwq = worker->gcwq; 773 unsigned int oflags = worker->flags; 774 775 WARN_ON_ONCE(worker->task != current); 776 777 worker->flags &= ~flags; 778 779 /* 780 * If transitioning out of NOT_RUNNING, increment nr_running. Note 781 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask 782 * of multiple flags, not a single flag. 783 */ 784 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) 785 if (!(worker->flags & WORKER_NOT_RUNNING)) 786 atomic_inc(get_gcwq_nr_running(gcwq->cpu)); 787 } 788 789 /** 790 * busy_worker_head - return the busy hash head for a work 791 * @gcwq: gcwq of interest 792 * @work: work to be hashed 793 * 794 * Return hash head of @gcwq for @work. 795 * 796 * CONTEXT: 797 * spin_lock_irq(gcwq->lock). 798 * 799 * RETURNS: 800 * Pointer to the hash head. 801 */ 802 static struct hlist_head *busy_worker_head(struct global_cwq *gcwq, 803 struct work_struct *work) 804 { 805 const int base_shift = ilog2(sizeof(struct work_struct)); 806 unsigned long v = (unsigned long)work; 807 808 /* simple shift and fold hash, do we need something better? */ 809 v >>= base_shift; 810 v += v >> BUSY_WORKER_HASH_ORDER; 811 v &= BUSY_WORKER_HASH_MASK; 812 813 return &gcwq->busy_hash[v]; 814 } 815 816 /** 817 * __find_worker_executing_work - find worker which is executing a work 818 * @gcwq: gcwq of interest 819 * @bwh: hash head as returned by busy_worker_head() 820 * @work: work to find worker for 821 * 822 * Find a worker which is executing @work on @gcwq. @bwh should be 823 * the hash head obtained by calling busy_worker_head() with the same 824 * work. 825 * 826 * CONTEXT: 827 * spin_lock_irq(gcwq->lock). 828 * 829 * RETURNS: 830 * Pointer to worker which is executing @work if found, NULL 831 * otherwise. 832 */ 833 static struct worker *__find_worker_executing_work(struct global_cwq *gcwq, 834 struct hlist_head *bwh, 835 struct work_struct *work) 836 { 837 struct worker *worker; 838 struct hlist_node *tmp; 839 840 hlist_for_each_entry(worker, tmp, bwh, hentry) 841 if (worker->current_work == work) 842 return worker; 843 return NULL; 844 } 845 846 /** 847 * find_worker_executing_work - find worker which is executing a work 848 * @gcwq: gcwq of interest 849 * @work: work to find worker for 850 * 851 * Find a worker which is executing @work on @gcwq. This function is 852 * identical to __find_worker_executing_work() except that this 853 * function calculates @bwh itself. 854 * 855 * CONTEXT: 856 * spin_lock_irq(gcwq->lock). 857 * 858 * RETURNS: 859 * Pointer to worker which is executing @work if found, NULL 860 * otherwise. 861 */ 862 static struct worker *find_worker_executing_work(struct global_cwq *gcwq, 863 struct work_struct *work) 864 { 865 return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work), 866 work); 867 } 868 869 /** 870 * gcwq_determine_ins_pos - find insertion position 871 * @gcwq: gcwq of interest 872 * @cwq: cwq a work is being queued for 873 * 874 * A work for @cwq is about to be queued on @gcwq, determine insertion 875 * position for the work. If @cwq is for HIGHPRI wq, the work is 876 * queued at the head of the queue but in FIFO order with respect to 877 * other HIGHPRI works; otherwise, at the end of the queue. This 878 * function also sets GCWQ_HIGHPRI_PENDING flag to hint @gcwq that 879 * there are HIGHPRI works pending. 880 * 881 * CONTEXT: 882 * spin_lock_irq(gcwq->lock). 883 * 884 * RETURNS: 885 * Pointer to inserstion position. 886 */ 887 static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq, 888 struct cpu_workqueue_struct *cwq) 889 { 890 struct work_struct *twork; 891 892 if (likely(!(cwq->wq->flags & WQ_HIGHPRI))) 893 return &gcwq->worklist; 894 895 list_for_each_entry(twork, &gcwq->worklist, entry) { 896 struct cpu_workqueue_struct *tcwq = get_work_cwq(twork); 897 898 if (!(tcwq->wq->flags & WQ_HIGHPRI)) 899 break; 900 } 901 902 gcwq->flags |= GCWQ_HIGHPRI_PENDING; 903 return &twork->entry; 904 } 905 906 /** 907 * insert_work - insert a work into gcwq 908 * @cwq: cwq @work belongs to 909 * @work: work to insert 910 * @head: insertion point 911 * @extra_flags: extra WORK_STRUCT_* flags to set 912 * 913 * Insert @work which belongs to @cwq into @gcwq after @head. 914 * @extra_flags is or'd to work_struct flags. 915 * 916 * CONTEXT: 917 * spin_lock_irq(gcwq->lock). 918 */ 919 static void insert_work(struct cpu_workqueue_struct *cwq, 920 struct work_struct *work, struct list_head *head, 921 unsigned int extra_flags) 922 { 923 struct global_cwq *gcwq = cwq->gcwq; 924 925 /* we own @work, set data and link */ 926 set_work_cwq(work, cwq, extra_flags); 927 928 /* 929 * Ensure that we get the right work->data if we see the 930 * result of list_add() below, see try_to_grab_pending(). 931 */ 932 smp_wmb(); 933 934 list_add_tail(&work->entry, head); 935 936 /* 937 * Ensure either worker_sched_deactivated() sees the above 938 * list_add_tail() or we see zero nr_running to avoid workers 939 * lying around lazily while there are works to be processed. 940 */ 941 smp_mb(); 942 943 if (__need_more_worker(gcwq)) 944 wake_up_worker(gcwq); 945 } 946 947 /* 948 * Test whether @work is being queued from another work executing on the 949 * same workqueue. This is rather expensive and should only be used from 950 * cold paths. 951 */ 952 static bool is_chained_work(struct workqueue_struct *wq) 953 { 954 unsigned long flags; 955 unsigned int cpu; 956 957 for_each_gcwq_cpu(cpu) { 958 struct global_cwq *gcwq = get_gcwq(cpu); 959 struct worker *worker; 960 struct hlist_node *pos; 961 int i; 962 963 spin_lock_irqsave(&gcwq->lock, flags); 964 for_each_busy_worker(worker, i, pos, gcwq) { 965 if (worker->task != current) 966 continue; 967 spin_unlock_irqrestore(&gcwq->lock, flags); 968 /* 969 * I'm @worker, no locking necessary. See if @work 970 * is headed to the same workqueue. 971 */ 972 return worker->current_cwq->wq == wq; 973 } 974 spin_unlock_irqrestore(&gcwq->lock, flags); 975 } 976 return false; 977 } 978 979 static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, 980 struct work_struct *work) 981 { 982 struct global_cwq *gcwq; 983 struct cpu_workqueue_struct *cwq; 984 struct list_head *worklist; 985 unsigned int work_flags; 986 unsigned long flags; 987 988 debug_work_activate(work); 989 990 /* if dying, only works from the same workqueue are allowed */ 991 if (unlikely(wq->flags & WQ_DRAINING) && 992 WARN_ON_ONCE(!is_chained_work(wq))) 993 return; 994 995 /* determine gcwq to use */ 996 if (!(wq->flags & WQ_UNBOUND)) { 997 struct global_cwq *last_gcwq; 998 999 if (unlikely(cpu == WORK_CPU_UNBOUND)) 1000 cpu = raw_smp_processor_id(); 1001 1002 /* 1003 * It's multi cpu. If @wq is non-reentrant and @work 1004 * was previously on a different cpu, it might still 1005 * be running there, in which case the work needs to 1006 * be queued on that cpu to guarantee non-reentrance. 1007 */ 1008 gcwq = get_gcwq(cpu); 1009 if (wq->flags & WQ_NON_REENTRANT && 1010 (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) { 1011 struct worker *worker; 1012 1013 spin_lock_irqsave(&last_gcwq->lock, flags); 1014 1015 worker = find_worker_executing_work(last_gcwq, work); 1016 1017 if (worker && worker->current_cwq->wq == wq) 1018 gcwq = last_gcwq; 1019 else { 1020 /* meh... not running there, queue here */ 1021 spin_unlock_irqrestore(&last_gcwq->lock, flags); 1022 spin_lock_irqsave(&gcwq->lock, flags); 1023 } 1024 } else 1025 spin_lock_irqsave(&gcwq->lock, flags); 1026 } else { 1027 gcwq = get_gcwq(WORK_CPU_UNBOUND); 1028 spin_lock_irqsave(&gcwq->lock, flags); 1029 } 1030 1031 /* gcwq determined, get cwq and queue */ 1032 cwq = get_cwq(gcwq->cpu, wq); 1033 trace_workqueue_queue_work(cpu, cwq, work); 1034 1035 BUG_ON(!list_empty(&work->entry)); 1036 1037 cwq->nr_in_flight[cwq->work_color]++; 1038 work_flags = work_color_to_flags(cwq->work_color); 1039 1040 if (likely(cwq->nr_active < cwq->max_active)) { 1041 trace_workqueue_activate_work(work); 1042 cwq->nr_active++; 1043 worklist = gcwq_determine_ins_pos(gcwq, cwq); 1044 } else { 1045 work_flags |= WORK_STRUCT_DELAYED; 1046 worklist = &cwq->delayed_works; 1047 } 1048 1049 insert_work(cwq, work, worklist, work_flags); 1050 1051 spin_unlock_irqrestore(&gcwq->lock, flags); 1052 } 1053 1054 /** 1055 * queue_work - queue work on a workqueue 1056 * @wq: workqueue to use 1057 * @work: work to queue 1058 * 1059 * Returns 0 if @work was already on a queue, non-zero otherwise. 1060 * 1061 * We queue the work to the CPU on which it was submitted, but if the CPU dies 1062 * it can be processed by another CPU. 1063 */ 1064 int queue_work(struct workqueue_struct *wq, struct work_struct *work) 1065 { 1066 int ret; 1067 1068 ret = queue_work_on(get_cpu(), wq, work); 1069 put_cpu(); 1070 1071 return ret; 1072 } 1073 EXPORT_SYMBOL_GPL(queue_work); 1074 1075 /** 1076 * queue_work_on - queue work on specific cpu 1077 * @cpu: CPU number to execute work on 1078 * @wq: workqueue to use 1079 * @work: work to queue 1080 * 1081 * Returns 0 if @work was already on a queue, non-zero otherwise. 1082 * 1083 * We queue the work to a specific CPU, the caller must ensure it 1084 * can't go away. 1085 */ 1086 int 1087 queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) 1088 { 1089 int ret = 0; 1090 1091 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 1092 __queue_work(cpu, wq, work); 1093 ret = 1; 1094 } 1095 return ret; 1096 } 1097 EXPORT_SYMBOL_GPL(queue_work_on); 1098 1099 static void delayed_work_timer_fn(unsigned long __data) 1100 { 1101 struct delayed_work *dwork = (struct delayed_work *)__data; 1102 struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work); 1103 1104 __queue_work(smp_processor_id(), cwq->wq, &dwork->work); 1105 } 1106 1107 /** 1108 * queue_delayed_work - queue work on a workqueue after delay 1109 * @wq: workqueue to use 1110 * @dwork: delayable work to queue 1111 * @delay: number of jiffies to wait before queueing 1112 * 1113 * Returns 0 if @work was already on a queue, non-zero otherwise. 1114 */ 1115 int queue_delayed_work(struct workqueue_struct *wq, 1116 struct delayed_work *dwork, unsigned long delay) 1117 { 1118 if (delay == 0) 1119 return queue_work(wq, &dwork->work); 1120 1121 return queue_delayed_work_on(-1, wq, dwork, delay); 1122 } 1123 EXPORT_SYMBOL_GPL(queue_delayed_work); 1124 1125 /** 1126 * queue_delayed_work_on - queue work on specific CPU after delay 1127 * @cpu: CPU number to execute work on 1128 * @wq: workqueue to use 1129 * @dwork: work to queue 1130 * @delay: number of jiffies to wait before queueing 1131 * 1132 * Returns 0 if @work was already on a queue, non-zero otherwise. 1133 */ 1134 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 1135 struct delayed_work *dwork, unsigned long delay) 1136 { 1137 int ret = 0; 1138 struct timer_list *timer = &dwork->timer; 1139 struct work_struct *work = &dwork->work; 1140 1141 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 1142 unsigned int lcpu; 1143 1144 BUG_ON(timer_pending(timer)); 1145 BUG_ON(!list_empty(&work->entry)); 1146 1147 timer_stats_timer_set_start_info(&dwork->timer); 1148 1149 /* 1150 * This stores cwq for the moment, for the timer_fn. 1151 * Note that the work's gcwq is preserved to allow 1152 * reentrance detection for delayed works. 1153 */ 1154 if (!(wq->flags & WQ_UNBOUND)) { 1155 struct global_cwq *gcwq = get_work_gcwq(work); 1156 1157 if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND) 1158 lcpu = gcwq->cpu; 1159 else 1160 lcpu = raw_smp_processor_id(); 1161 } else 1162 lcpu = WORK_CPU_UNBOUND; 1163 1164 set_work_cwq(work, get_cwq(lcpu, wq), 0); 1165 1166 timer->expires = jiffies + delay; 1167 timer->data = (unsigned long)dwork; 1168 timer->function = delayed_work_timer_fn; 1169 1170 if (unlikely(cpu >= 0)) 1171 add_timer_on(timer, cpu); 1172 else 1173 add_timer(timer); 1174 ret = 1; 1175 } 1176 return ret; 1177 } 1178 EXPORT_SYMBOL_GPL(queue_delayed_work_on); 1179 1180 /** 1181 * worker_enter_idle - enter idle state 1182 * @worker: worker which is entering idle state 1183 * 1184 * @worker is entering idle state. Update stats and idle timer if 1185 * necessary. 1186 * 1187 * LOCKING: 1188 * spin_lock_irq(gcwq->lock). 1189 */ 1190 static void worker_enter_idle(struct worker *worker) 1191 { 1192 struct global_cwq *gcwq = worker->gcwq; 1193 1194 BUG_ON(worker->flags & WORKER_IDLE); 1195 BUG_ON(!list_empty(&worker->entry) && 1196 (worker->hentry.next || worker->hentry.pprev)); 1197 1198 /* can't use worker_set_flags(), also called from start_worker() */ 1199 worker->flags |= WORKER_IDLE; 1200 gcwq->nr_idle++; 1201 worker->last_active = jiffies; 1202 1203 /* idle_list is LIFO */ 1204 list_add(&worker->entry, &gcwq->idle_list); 1205 1206 if (likely(!(worker->flags & WORKER_ROGUE))) { 1207 if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer)) 1208 mod_timer(&gcwq->idle_timer, 1209 jiffies + IDLE_WORKER_TIMEOUT); 1210 } else 1211 wake_up_all(&gcwq->trustee_wait); 1212 1213 /* sanity check nr_running */ 1214 WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle && 1215 atomic_read(get_gcwq_nr_running(gcwq->cpu))); 1216 } 1217 1218 /** 1219 * worker_leave_idle - leave idle state 1220 * @worker: worker which is leaving idle state 1221 * 1222 * @worker is leaving idle state. Update stats. 1223 * 1224 * LOCKING: 1225 * spin_lock_irq(gcwq->lock). 1226 */ 1227 static void worker_leave_idle(struct worker *worker) 1228 { 1229 struct global_cwq *gcwq = worker->gcwq; 1230 1231 BUG_ON(!(worker->flags & WORKER_IDLE)); 1232 worker_clr_flags(worker, WORKER_IDLE); 1233 gcwq->nr_idle--; 1234 list_del_init(&worker->entry); 1235 } 1236 1237 /** 1238 * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq 1239 * @worker: self 1240 * 1241 * Works which are scheduled while the cpu is online must at least be 1242 * scheduled to a worker which is bound to the cpu so that if they are 1243 * flushed from cpu callbacks while cpu is going down, they are 1244 * guaranteed to execute on the cpu. 1245 * 1246 * This function is to be used by rogue workers and rescuers to bind 1247 * themselves to the target cpu and may race with cpu going down or 1248 * coming online. kthread_bind() can't be used because it may put the 1249 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used 1250 * verbatim as it's best effort and blocking and gcwq may be 1251 * [dis]associated in the meantime. 1252 * 1253 * This function tries set_cpus_allowed() and locks gcwq and verifies 1254 * the binding against GCWQ_DISASSOCIATED which is set during 1255 * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters 1256 * idle state or fetches works without dropping lock, it can guarantee 1257 * the scheduling requirement described in the first paragraph. 1258 * 1259 * CONTEXT: 1260 * Might sleep. Called without any lock but returns with gcwq->lock 1261 * held. 1262 * 1263 * RETURNS: 1264 * %true if the associated gcwq is online (@worker is successfully 1265 * bound), %false if offline. 1266 */ 1267 static bool worker_maybe_bind_and_lock(struct worker *worker) 1268 __acquires(&gcwq->lock) 1269 { 1270 struct global_cwq *gcwq = worker->gcwq; 1271 struct task_struct *task = worker->task; 1272 1273 while (true) { 1274 /* 1275 * The following call may fail, succeed or succeed 1276 * without actually migrating the task to the cpu if 1277 * it races with cpu hotunplug operation. Verify 1278 * against GCWQ_DISASSOCIATED. 1279 */ 1280 if (!(gcwq->flags & GCWQ_DISASSOCIATED)) 1281 set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu)); 1282 1283 spin_lock_irq(&gcwq->lock); 1284 if (gcwq->flags & GCWQ_DISASSOCIATED) 1285 return false; 1286 if (task_cpu(task) == gcwq->cpu && 1287 cpumask_equal(¤t->cpus_allowed, 1288 get_cpu_mask(gcwq->cpu))) 1289 return true; 1290 spin_unlock_irq(&gcwq->lock); 1291 1292 /* 1293 * We've raced with CPU hot[un]plug. Give it a breather 1294 * and retry migration. cond_resched() is required here; 1295 * otherwise, we might deadlock against cpu_stop trying to 1296 * bring down the CPU on non-preemptive kernel. 1297 */ 1298 cpu_relax(); 1299 cond_resched(); 1300 } 1301 } 1302 1303 /* 1304 * Function for worker->rebind_work used to rebind rogue busy workers 1305 * to the associated cpu which is coming back online. This is 1306 * scheduled by cpu up but can race with other cpu hotplug operations 1307 * and may be executed twice without intervening cpu down. 1308 */ 1309 static void worker_rebind_fn(struct work_struct *work) 1310 { 1311 struct worker *worker = container_of(work, struct worker, rebind_work); 1312 struct global_cwq *gcwq = worker->gcwq; 1313 1314 if (worker_maybe_bind_and_lock(worker)) 1315 worker_clr_flags(worker, WORKER_REBIND); 1316 1317 spin_unlock_irq(&gcwq->lock); 1318 } 1319 1320 static struct worker *alloc_worker(void) 1321 { 1322 struct worker *worker; 1323 1324 worker = kzalloc(sizeof(*worker), GFP_KERNEL); 1325 if (worker) { 1326 INIT_LIST_HEAD(&worker->entry); 1327 INIT_LIST_HEAD(&worker->scheduled); 1328 INIT_WORK(&worker->rebind_work, worker_rebind_fn); 1329 /* on creation a worker is in !idle && prep state */ 1330 worker->flags = WORKER_PREP; 1331 } 1332 return worker; 1333 } 1334 1335 /** 1336 * create_worker - create a new workqueue worker 1337 * @gcwq: gcwq the new worker will belong to 1338 * @bind: whether to set affinity to @cpu or not 1339 * 1340 * Create a new worker which is bound to @gcwq. The returned worker 1341 * can be started by calling start_worker() or destroyed using 1342 * destroy_worker(). 1343 * 1344 * CONTEXT: 1345 * Might sleep. Does GFP_KERNEL allocations. 1346 * 1347 * RETURNS: 1348 * Pointer to the newly created worker. 1349 */ 1350 static struct worker *create_worker(struct global_cwq *gcwq, bool bind) 1351 { 1352 bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND; 1353 struct worker *worker = NULL; 1354 int id = -1; 1355 1356 spin_lock_irq(&gcwq->lock); 1357 while (ida_get_new(&gcwq->worker_ida, &id)) { 1358 spin_unlock_irq(&gcwq->lock); 1359 if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL)) 1360 goto fail; 1361 spin_lock_irq(&gcwq->lock); 1362 } 1363 spin_unlock_irq(&gcwq->lock); 1364 1365 worker = alloc_worker(); 1366 if (!worker) 1367 goto fail; 1368 1369 worker->gcwq = gcwq; 1370 worker->id = id; 1371 1372 if (!on_unbound_cpu) 1373 worker->task = kthread_create_on_node(worker_thread, 1374 worker, 1375 cpu_to_node(gcwq->cpu), 1376 "kworker/%u:%d", gcwq->cpu, id); 1377 else 1378 worker->task = kthread_create(worker_thread, worker, 1379 "kworker/u:%d", id); 1380 if (IS_ERR(worker->task)) 1381 goto fail; 1382 1383 /* 1384 * A rogue worker will become a regular one if CPU comes 1385 * online later on. Make sure every worker has 1386 * PF_THREAD_BOUND set. 1387 */ 1388 if (bind && !on_unbound_cpu) 1389 kthread_bind(worker->task, gcwq->cpu); 1390 else { 1391 worker->task->flags |= PF_THREAD_BOUND; 1392 if (on_unbound_cpu) 1393 worker->flags |= WORKER_UNBOUND; 1394 } 1395 1396 return worker; 1397 fail: 1398 if (id >= 0) { 1399 spin_lock_irq(&gcwq->lock); 1400 ida_remove(&gcwq->worker_ida, id); 1401 spin_unlock_irq(&gcwq->lock); 1402 } 1403 kfree(worker); 1404 return NULL; 1405 } 1406 1407 /** 1408 * start_worker - start a newly created worker 1409 * @worker: worker to start 1410 * 1411 * Make the gcwq aware of @worker and start it. 1412 * 1413 * CONTEXT: 1414 * spin_lock_irq(gcwq->lock). 1415 */ 1416 static void start_worker(struct worker *worker) 1417 { 1418 worker->flags |= WORKER_STARTED; 1419 worker->gcwq->nr_workers++; 1420 worker_enter_idle(worker); 1421 wake_up_process(worker->task); 1422 } 1423 1424 /** 1425 * destroy_worker - destroy a workqueue worker 1426 * @worker: worker to be destroyed 1427 * 1428 * Destroy @worker and adjust @gcwq stats accordingly. 1429 * 1430 * CONTEXT: 1431 * spin_lock_irq(gcwq->lock) which is released and regrabbed. 1432 */ 1433 static void destroy_worker(struct worker *worker) 1434 { 1435 struct global_cwq *gcwq = worker->gcwq; 1436 int id = worker->id; 1437 1438 /* sanity check frenzy */ 1439 BUG_ON(worker->current_work); 1440 BUG_ON(!list_empty(&worker->scheduled)); 1441 1442 if (worker->flags & WORKER_STARTED) 1443 gcwq->nr_workers--; 1444 if (worker->flags & WORKER_IDLE) 1445 gcwq->nr_idle--; 1446 1447 list_del_init(&worker->entry); 1448 worker->flags |= WORKER_DIE; 1449 1450 spin_unlock_irq(&gcwq->lock); 1451 1452 kthread_stop(worker->task); 1453 kfree(worker); 1454 1455 spin_lock_irq(&gcwq->lock); 1456 ida_remove(&gcwq->worker_ida, id); 1457 } 1458 1459 static void idle_worker_timeout(unsigned long __gcwq) 1460 { 1461 struct global_cwq *gcwq = (void *)__gcwq; 1462 1463 spin_lock_irq(&gcwq->lock); 1464 1465 if (too_many_workers(gcwq)) { 1466 struct worker *worker; 1467 unsigned long expires; 1468 1469 /* idle_list is kept in LIFO order, check the last one */ 1470 worker = list_entry(gcwq->idle_list.prev, struct worker, entry); 1471 expires = worker->last_active + IDLE_WORKER_TIMEOUT; 1472 1473 if (time_before(jiffies, expires)) 1474 mod_timer(&gcwq->idle_timer, expires); 1475 else { 1476 /* it's been idle for too long, wake up manager */ 1477 gcwq->flags |= GCWQ_MANAGE_WORKERS; 1478 wake_up_worker(gcwq); 1479 } 1480 } 1481 1482 spin_unlock_irq(&gcwq->lock); 1483 } 1484 1485 static bool send_mayday(struct work_struct *work) 1486 { 1487 struct cpu_workqueue_struct *cwq = get_work_cwq(work); 1488 struct workqueue_struct *wq = cwq->wq; 1489 unsigned int cpu; 1490 1491 if (!(wq->flags & WQ_RESCUER)) 1492 return false; 1493 1494 /* mayday mayday mayday */ 1495 cpu = cwq->gcwq->cpu; 1496 /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */ 1497 if (cpu == WORK_CPU_UNBOUND) 1498 cpu = 0; 1499 if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask)) 1500 wake_up_process(wq->rescuer->task); 1501 return true; 1502 } 1503 1504 static void gcwq_mayday_timeout(unsigned long __gcwq) 1505 { 1506 struct global_cwq *gcwq = (void *)__gcwq; 1507 struct work_struct *work; 1508 1509 spin_lock_irq(&gcwq->lock); 1510 1511 if (need_to_create_worker(gcwq)) { 1512 /* 1513 * We've been trying to create a new worker but 1514 * haven't been successful. We might be hitting an 1515 * allocation deadlock. Send distress signals to 1516 * rescuers. 1517 */ 1518 list_for_each_entry(work, &gcwq->worklist, entry) 1519 send_mayday(work); 1520 } 1521 1522 spin_unlock_irq(&gcwq->lock); 1523 1524 mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INTERVAL); 1525 } 1526 1527 /** 1528 * maybe_create_worker - create a new worker if necessary 1529 * @gcwq: gcwq to create a new worker for 1530 * 1531 * Create a new worker for @gcwq if necessary. @gcwq is guaranteed to 1532 * have at least one idle worker on return from this function. If 1533 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is 1534 * sent to all rescuers with works scheduled on @gcwq to resolve 1535 * possible allocation deadlock. 1536 * 1537 * On return, need_to_create_worker() is guaranteed to be false and 1538 * may_start_working() true. 1539 * 1540 * LOCKING: 1541 * spin_lock_irq(gcwq->lock) which may be released and regrabbed 1542 * multiple times. Does GFP_KERNEL allocations. Called only from 1543 * manager. 1544 * 1545 * RETURNS: 1546 * false if no action was taken and gcwq->lock stayed locked, true 1547 * otherwise. 1548 */ 1549 static bool maybe_create_worker(struct global_cwq *gcwq) 1550 __releases(&gcwq->lock) 1551 __acquires(&gcwq->lock) 1552 { 1553 if (!need_to_create_worker(gcwq)) 1554 return false; 1555 restart: 1556 spin_unlock_irq(&gcwq->lock); 1557 1558 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ 1559 mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); 1560 1561 while (true) { 1562 struct worker *worker; 1563 1564 worker = create_worker(gcwq, true); 1565 if (worker) { 1566 del_timer_sync(&gcwq->mayday_timer); 1567 spin_lock_irq(&gcwq->lock); 1568 start_worker(worker); 1569 BUG_ON(need_to_create_worker(gcwq)); 1570 return true; 1571 } 1572 1573 if (!need_to_create_worker(gcwq)) 1574 break; 1575 1576 __set_current_state(TASK_INTERRUPTIBLE); 1577 schedule_timeout(CREATE_COOLDOWN); 1578 1579 if (!need_to_create_worker(gcwq)) 1580 break; 1581 } 1582 1583 del_timer_sync(&gcwq->mayday_timer); 1584 spin_lock_irq(&gcwq->lock); 1585 if (need_to_create_worker(gcwq)) 1586 goto restart; 1587 return true; 1588 } 1589 1590 /** 1591 * maybe_destroy_worker - destroy workers which have been idle for a while 1592 * @gcwq: gcwq to destroy workers for 1593 * 1594 * Destroy @gcwq workers which have been idle for longer than 1595 * IDLE_WORKER_TIMEOUT. 1596 * 1597 * LOCKING: 1598 * spin_lock_irq(gcwq->lock) which may be released and regrabbed 1599 * multiple times. Called only from manager. 1600 * 1601 * RETURNS: 1602 * false if no action was taken and gcwq->lock stayed locked, true 1603 * otherwise. 1604 */ 1605 static bool maybe_destroy_workers(struct global_cwq *gcwq) 1606 { 1607 bool ret = false; 1608 1609 while (too_many_workers(gcwq)) { 1610 struct worker *worker; 1611 unsigned long expires; 1612 1613 worker = list_entry(gcwq->idle_list.prev, struct worker, entry); 1614 expires = worker->last_active + IDLE_WORKER_TIMEOUT; 1615 1616 if (time_before(jiffies, expires)) { 1617 mod_timer(&gcwq->idle_timer, expires); 1618 break; 1619 } 1620 1621 destroy_worker(worker); 1622 ret = true; 1623 } 1624 1625 return ret; 1626 } 1627 1628 /** 1629 * manage_workers - manage worker pool 1630 * @worker: self 1631 * 1632 * Assume the manager role and manage gcwq worker pool @worker belongs 1633 * to. At any given time, there can be only zero or one manager per 1634 * gcwq. The exclusion is handled automatically by this function. 1635 * 1636 * The caller can safely start processing works on false return. On 1637 * true return, it's guaranteed that need_to_create_worker() is false 1638 * and may_start_working() is true. 1639 * 1640 * CONTEXT: 1641 * spin_lock_irq(gcwq->lock) which may be released and regrabbed 1642 * multiple times. Does GFP_KERNEL allocations. 1643 * 1644 * RETURNS: 1645 * false if no action was taken and gcwq->lock stayed locked, true if 1646 * some action was taken. 1647 */ 1648 static bool manage_workers(struct worker *worker) 1649 { 1650 struct global_cwq *gcwq = worker->gcwq; 1651 bool ret = false; 1652 1653 if (gcwq->flags & GCWQ_MANAGING_WORKERS) 1654 return ret; 1655 1656 gcwq->flags &= ~GCWQ_MANAGE_WORKERS; 1657 gcwq->flags |= GCWQ_MANAGING_WORKERS; 1658 1659 /* 1660 * Destroy and then create so that may_start_working() is true 1661 * on return. 1662 */ 1663 ret |= maybe_destroy_workers(gcwq); 1664 ret |= maybe_create_worker(gcwq); 1665 1666 gcwq->flags &= ~GCWQ_MANAGING_WORKERS; 1667 1668 /* 1669 * The trustee might be waiting to take over the manager 1670 * position, tell it we're done. 1671 */ 1672 if (unlikely(gcwq->trustee)) 1673 wake_up_all(&gcwq->trustee_wait); 1674 1675 return ret; 1676 } 1677 1678 /** 1679 * move_linked_works - move linked works to a list 1680 * @work: start of series of works to be scheduled 1681 * @head: target list to append @work to 1682 * @nextp: out paramter for nested worklist walking 1683 * 1684 * Schedule linked works starting from @work to @head. Work series to 1685 * be scheduled starts at @work and includes any consecutive work with 1686 * WORK_STRUCT_LINKED set in its predecessor. 1687 * 1688 * If @nextp is not NULL, it's updated to point to the next work of 1689 * the last scheduled work. This allows move_linked_works() to be 1690 * nested inside outer list_for_each_entry_safe(). 1691 * 1692 * CONTEXT: 1693 * spin_lock_irq(gcwq->lock). 1694 */ 1695 static void move_linked_works(struct work_struct *work, struct list_head *head, 1696 struct work_struct **nextp) 1697 { 1698 struct work_struct *n; 1699 1700 /* 1701 * Linked worklist will always end before the end of the list, 1702 * use NULL for list head. 1703 */ 1704 list_for_each_entry_safe_from(work, n, NULL, entry) { 1705 list_move_tail(&work->entry, head); 1706 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 1707 break; 1708 } 1709 1710 /* 1711 * If we're already inside safe list traversal and have moved 1712 * multiple works to the scheduled queue, the next position 1713 * needs to be updated. 1714 */ 1715 if (nextp) 1716 *nextp = n; 1717 } 1718 1719 static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) 1720 { 1721 struct work_struct *work = list_first_entry(&cwq->delayed_works, 1722 struct work_struct, entry); 1723 struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); 1724 1725 trace_workqueue_activate_work(work); 1726 move_linked_works(work, pos, NULL); 1727 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); 1728 cwq->nr_active++; 1729 } 1730 1731 /** 1732 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight 1733 * @cwq: cwq of interest 1734 * @color: color of work which left the queue 1735 * @delayed: for a delayed work 1736 * 1737 * A work either has completed or is removed from pending queue, 1738 * decrement nr_in_flight of its cwq and handle workqueue flushing. 1739 * 1740 * CONTEXT: 1741 * spin_lock_irq(gcwq->lock). 1742 */ 1743 static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color, 1744 bool delayed) 1745 { 1746 /* ignore uncolored works */ 1747 if (color == WORK_NO_COLOR) 1748 return; 1749 1750 cwq->nr_in_flight[color]--; 1751 1752 if (!delayed) { 1753 cwq->nr_active--; 1754 if (!list_empty(&cwq->delayed_works)) { 1755 /* one down, submit a delayed one */ 1756 if (cwq->nr_active < cwq->max_active) 1757 cwq_activate_first_delayed(cwq); 1758 } 1759 } 1760 1761 /* is flush in progress and are we at the flushing tip? */ 1762 if (likely(cwq->flush_color != color)) 1763 return; 1764 1765 /* are there still in-flight works? */ 1766 if (cwq->nr_in_flight[color]) 1767 return; 1768 1769 /* this cwq is done, clear flush_color */ 1770 cwq->flush_color = -1; 1771 1772 /* 1773 * If this was the last cwq, wake up the first flusher. It 1774 * will handle the rest. 1775 */ 1776 if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush)) 1777 complete(&cwq->wq->first_flusher->done); 1778 } 1779 1780 /** 1781 * process_one_work - process single work 1782 * @worker: self 1783 * @work: work to process 1784 * 1785 * Process @work. This function contains all the logics necessary to 1786 * process a single work including synchronization against and 1787 * interaction with other workers on the same cpu, queueing and 1788 * flushing. As long as context requirement is met, any worker can 1789 * call this function to process a work. 1790 * 1791 * CONTEXT: 1792 * spin_lock_irq(gcwq->lock) which is released and regrabbed. 1793 */ 1794 static void process_one_work(struct worker *worker, struct work_struct *work) 1795 __releases(&gcwq->lock) 1796 __acquires(&gcwq->lock) 1797 { 1798 struct cpu_workqueue_struct *cwq = get_work_cwq(work); 1799 struct global_cwq *gcwq = cwq->gcwq; 1800 struct hlist_head *bwh = busy_worker_head(gcwq, work); 1801 bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE; 1802 work_func_t f = work->func; 1803 int work_color; 1804 struct worker *collision; 1805 #ifdef CONFIG_LOCKDEP 1806 /* 1807 * It is permissible to free the struct work_struct from 1808 * inside the function that is called from it, this we need to 1809 * take into account for lockdep too. To avoid bogus "held 1810 * lock freed" warnings as well as problems when looking into 1811 * work->lockdep_map, make a copy and use that here. 1812 */ 1813 struct lockdep_map lockdep_map = work->lockdep_map; 1814 #endif 1815 /* 1816 * A single work shouldn't be executed concurrently by 1817 * multiple workers on a single cpu. Check whether anyone is 1818 * already processing the work. If so, defer the work to the 1819 * currently executing one. 1820 */ 1821 collision = __find_worker_executing_work(gcwq, bwh, work); 1822 if (unlikely(collision)) { 1823 move_linked_works(work, &collision->scheduled, NULL); 1824 return; 1825 } 1826 1827 /* claim and process */ 1828 debug_work_deactivate(work); 1829 hlist_add_head(&worker->hentry, bwh); 1830 worker->current_work = work; 1831 worker->current_cwq = cwq; 1832 work_color = get_work_color(work); 1833 1834 /* record the current cpu number in the work data and dequeue */ 1835 set_work_cpu(work, gcwq->cpu); 1836 list_del_init(&work->entry); 1837 1838 /* 1839 * If HIGHPRI_PENDING, check the next work, and, if HIGHPRI, 1840 * wake up another worker; otherwise, clear HIGHPRI_PENDING. 1841 */ 1842 if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) { 1843 struct work_struct *nwork = list_first_entry(&gcwq->worklist, 1844 struct work_struct, entry); 1845 1846 if (!list_empty(&gcwq->worklist) && 1847 get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI) 1848 wake_up_worker(gcwq); 1849 else 1850 gcwq->flags &= ~GCWQ_HIGHPRI_PENDING; 1851 } 1852 1853 /* 1854 * CPU intensive works don't participate in concurrency 1855 * management. They're the scheduler's responsibility. 1856 */ 1857 if (unlikely(cpu_intensive)) 1858 worker_set_flags(worker, WORKER_CPU_INTENSIVE, true); 1859 1860 spin_unlock_irq(&gcwq->lock); 1861 1862 work_clear_pending(work); 1863 lock_map_acquire_read(&cwq->wq->lockdep_map); 1864 lock_map_acquire(&lockdep_map); 1865 trace_workqueue_execute_start(work); 1866 f(work); 1867 /* 1868 * While we must be careful to not use "work" after this, the trace 1869 * point will only record its address. 1870 */ 1871 trace_workqueue_execute_end(work); 1872 lock_map_release(&lockdep_map); 1873 lock_map_release(&cwq->wq->lockdep_map); 1874 1875 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 1876 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " 1877 "%s/0x%08x/%d\n", 1878 current->comm, preempt_count(), task_pid_nr(current)); 1879 printk(KERN_ERR " last function: "); 1880 print_symbol("%s\n", (unsigned long)f); 1881 debug_show_held_locks(current); 1882 dump_stack(); 1883 } 1884 1885 spin_lock_irq(&gcwq->lock); 1886 1887 /* clear cpu intensive status */ 1888 if (unlikely(cpu_intensive)) 1889 worker_clr_flags(worker, WORKER_CPU_INTENSIVE); 1890 1891 /* we're done with it, release */ 1892 hlist_del_init(&worker->hentry); 1893 worker->current_work = NULL; 1894 worker->current_cwq = NULL; 1895 cwq_dec_nr_in_flight(cwq, work_color, false); 1896 } 1897 1898 /** 1899 * process_scheduled_works - process scheduled works 1900 * @worker: self 1901 * 1902 * Process all scheduled works. Please note that the scheduled list 1903 * may change while processing a work, so this function repeatedly 1904 * fetches a work from the top and executes it. 1905 * 1906 * CONTEXT: 1907 * spin_lock_irq(gcwq->lock) which may be released and regrabbed 1908 * multiple times. 1909 */ 1910 static void process_scheduled_works(struct worker *worker) 1911 { 1912 while (!list_empty(&worker->scheduled)) { 1913 struct work_struct *work = list_first_entry(&worker->scheduled, 1914 struct work_struct, entry); 1915 process_one_work(worker, work); 1916 } 1917 } 1918 1919 /** 1920 * worker_thread - the worker thread function 1921 * @__worker: self 1922 * 1923 * The gcwq worker thread function. There's a single dynamic pool of 1924 * these per each cpu. These workers process all works regardless of 1925 * their specific target workqueue. The only exception is works which 1926 * belong to workqueues with a rescuer which will be explained in 1927 * rescuer_thread(). 1928 */ 1929 static int worker_thread(void *__worker) 1930 { 1931 struct worker *worker = __worker; 1932 struct global_cwq *gcwq = worker->gcwq; 1933 1934 /* tell the scheduler that this is a workqueue worker */ 1935 worker->task->flags |= PF_WQ_WORKER; 1936 woke_up: 1937 spin_lock_irq(&gcwq->lock); 1938 1939 /* DIE can be set only while we're idle, checking here is enough */ 1940 if (worker->flags & WORKER_DIE) { 1941 spin_unlock_irq(&gcwq->lock); 1942 worker->task->flags &= ~PF_WQ_WORKER; 1943 return 0; 1944 } 1945 1946 worker_leave_idle(worker); 1947 recheck: 1948 /* no more worker necessary? */ 1949 if (!need_more_worker(gcwq)) 1950 goto sleep; 1951 1952 /* do we need to manage? */ 1953 if (unlikely(!may_start_working(gcwq)) && manage_workers(worker)) 1954 goto recheck; 1955 1956 /* 1957 * ->scheduled list can only be filled while a worker is 1958 * preparing to process a work or actually processing it. 1959 * Make sure nobody diddled with it while I was sleeping. 1960 */ 1961 BUG_ON(!list_empty(&worker->scheduled)); 1962 1963 /* 1964 * When control reaches this point, we're guaranteed to have 1965 * at least one idle worker or that someone else has already 1966 * assumed the manager role. 1967 */ 1968 worker_clr_flags(worker, WORKER_PREP); 1969 1970 do { 1971 struct work_struct *work = 1972 list_first_entry(&gcwq->worklist, 1973 struct work_struct, entry); 1974 1975 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { 1976 /* optimization path, not strictly necessary */ 1977 process_one_work(worker, work); 1978 if (unlikely(!list_empty(&worker->scheduled))) 1979 process_scheduled_works(worker); 1980 } else { 1981 move_linked_works(work, &worker->scheduled, NULL); 1982 process_scheduled_works(worker); 1983 } 1984 } while (keep_working(gcwq)); 1985 1986 worker_set_flags(worker, WORKER_PREP, false); 1987 sleep: 1988 if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker)) 1989 goto recheck; 1990 1991 /* 1992 * gcwq->lock is held and there's no work to process and no 1993 * need to manage, sleep. Workers are woken up only while 1994 * holding gcwq->lock or from local cpu, so setting the 1995 * current state before releasing gcwq->lock is enough to 1996 * prevent losing any event. 1997 */ 1998 worker_enter_idle(worker); 1999 __set_current_state(TASK_INTERRUPTIBLE); 2000 spin_unlock_irq(&gcwq->lock); 2001 schedule(); 2002 goto woke_up; 2003 } 2004 2005 /** 2006 * rescuer_thread - the rescuer thread function 2007 * @__wq: the associated workqueue 2008 * 2009 * Workqueue rescuer thread function. There's one rescuer for each 2010 * workqueue which has WQ_RESCUER set. 2011 * 2012 * Regular work processing on a gcwq may block trying to create a new 2013 * worker which uses GFP_KERNEL allocation which has slight chance of 2014 * developing into deadlock if some works currently on the same queue 2015 * need to be processed to satisfy the GFP_KERNEL allocation. This is 2016 * the problem rescuer solves. 2017 * 2018 * When such condition is possible, the gcwq summons rescuers of all 2019 * workqueues which have works queued on the gcwq and let them process 2020 * those works so that forward progress can be guaranteed. 2021 * 2022 * This should happen rarely. 2023 */ 2024 static int rescuer_thread(void *__wq) 2025 { 2026 struct workqueue_struct *wq = __wq; 2027 struct worker *rescuer = wq->rescuer; 2028 struct list_head *scheduled = &rescuer->scheduled; 2029 bool is_unbound = wq->flags & WQ_UNBOUND; 2030 unsigned int cpu; 2031 2032 set_user_nice(current, RESCUER_NICE_LEVEL); 2033 repeat: 2034 set_current_state(TASK_INTERRUPTIBLE); 2035 2036 if (kthread_should_stop()) 2037 return 0; 2038 2039 /* 2040 * See whether any cpu is asking for help. Unbounded 2041 * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND. 2042 */ 2043 for_each_mayday_cpu(cpu, wq->mayday_mask) { 2044 unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu; 2045 struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq); 2046 struct global_cwq *gcwq = cwq->gcwq; 2047 struct work_struct *work, *n; 2048 2049 __set_current_state(TASK_RUNNING); 2050 mayday_clear_cpu(cpu, wq->mayday_mask); 2051 2052 /* migrate to the target cpu if possible */ 2053 rescuer->gcwq = gcwq; 2054 worker_maybe_bind_and_lock(rescuer); 2055 2056 /* 2057 * Slurp in all works issued via this workqueue and 2058 * process'em. 2059 */ 2060 BUG_ON(!list_empty(&rescuer->scheduled)); 2061 list_for_each_entry_safe(work, n, &gcwq->worklist, entry) 2062 if (get_work_cwq(work) == cwq) 2063 move_linked_works(work, scheduled, &n); 2064 2065 process_scheduled_works(rescuer); 2066 2067 /* 2068 * Leave this gcwq. If keep_working() is %true, notify a 2069 * regular worker; otherwise, we end up with 0 concurrency 2070 * and stalling the execution. 2071 */ 2072 if (keep_working(gcwq)) 2073 wake_up_worker(gcwq); 2074 2075 spin_unlock_irq(&gcwq->lock); 2076 } 2077 2078 schedule(); 2079 goto repeat; 2080 } 2081 2082 struct wq_barrier { 2083 struct work_struct work; 2084 struct completion done; 2085 }; 2086 2087 static void wq_barrier_func(struct work_struct *work) 2088 { 2089 struct wq_barrier *barr = container_of(work, struct wq_barrier, work); 2090 complete(&barr->done); 2091 } 2092 2093 /** 2094 * insert_wq_barrier - insert a barrier work 2095 * @cwq: cwq to insert barrier into 2096 * @barr: wq_barrier to insert 2097 * @target: target work to attach @barr to 2098 * @worker: worker currently executing @target, NULL if @target is not executing 2099 * 2100 * @barr is linked to @target such that @barr is completed only after 2101 * @target finishes execution. Please note that the ordering 2102 * guarantee is observed only with respect to @target and on the local 2103 * cpu. 2104 * 2105 * Currently, a queued barrier can't be canceled. This is because 2106 * try_to_grab_pending() can't determine whether the work to be 2107 * grabbed is at the head of the queue and thus can't clear LINKED 2108 * flag of the previous work while there must be a valid next work 2109 * after a work with LINKED flag set. 2110 * 2111 * Note that when @worker is non-NULL, @target may be modified 2112 * underneath us, so we can't reliably determine cwq from @target. 2113 * 2114 * CONTEXT: 2115 * spin_lock_irq(gcwq->lock). 2116 */ 2117 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, 2118 struct wq_barrier *barr, 2119 struct work_struct *target, struct worker *worker) 2120 { 2121 struct list_head *head; 2122 unsigned int linked = 0; 2123 2124 /* 2125 * debugobject calls are safe here even with gcwq->lock locked 2126 * as we know for sure that this will not trigger any of the 2127 * checks and call back into the fixup functions where we 2128 * might deadlock. 2129 */ 2130 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); 2131 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); 2132 init_completion(&barr->done); 2133 2134 /* 2135 * If @target is currently being executed, schedule the 2136 * barrier to the worker; otherwise, put it after @target. 2137 */ 2138 if (worker) 2139 head = worker->scheduled.next; 2140 else { 2141 unsigned long *bits = work_data_bits(target); 2142 2143 head = target->entry.next; 2144 /* there can already be other linked works, inherit and set */ 2145 linked = *bits & WORK_STRUCT_LINKED; 2146 __set_bit(WORK_STRUCT_LINKED_BIT, bits); 2147 } 2148 2149 debug_work_activate(&barr->work); 2150 insert_work(cwq, &barr->work, head, 2151 work_color_to_flags(WORK_NO_COLOR) | linked); 2152 } 2153 2154 /** 2155 * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing 2156 * @wq: workqueue being flushed 2157 * @flush_color: new flush color, < 0 for no-op 2158 * @work_color: new work color, < 0 for no-op 2159 * 2160 * Prepare cwqs for workqueue flushing. 2161 * 2162 * If @flush_color is non-negative, flush_color on all cwqs should be 2163 * -1. If no cwq has in-flight commands at the specified color, all 2164 * cwq->flush_color's stay at -1 and %false is returned. If any cwq 2165 * has in flight commands, its cwq->flush_color is set to 2166 * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq 2167 * wakeup logic is armed and %true is returned. 2168 * 2169 * The caller should have initialized @wq->first_flusher prior to 2170 * calling this function with non-negative @flush_color. If 2171 * @flush_color is negative, no flush color update is done and %false 2172 * is returned. 2173 * 2174 * If @work_color is non-negative, all cwqs should have the same 2175 * work_color which is previous to @work_color and all will be 2176 * advanced to @work_color. 2177 * 2178 * CONTEXT: 2179 * mutex_lock(wq->flush_mutex). 2180 * 2181 * RETURNS: 2182 * %true if @flush_color >= 0 and there's something to flush. %false 2183 * otherwise. 2184 */ 2185 static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq, 2186 int flush_color, int work_color) 2187 { 2188 bool wait = false; 2189 unsigned int cpu; 2190 2191 if (flush_color >= 0) { 2192 BUG_ON(atomic_read(&wq->nr_cwqs_to_flush)); 2193 atomic_set(&wq->nr_cwqs_to_flush, 1); 2194 } 2195 2196 for_each_cwq_cpu(cpu, wq) { 2197 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 2198 struct global_cwq *gcwq = cwq->gcwq; 2199 2200 spin_lock_irq(&gcwq->lock); 2201 2202 if (flush_color >= 0) { 2203 BUG_ON(cwq->flush_color != -1); 2204 2205 if (cwq->nr_in_flight[flush_color]) { 2206 cwq->flush_color = flush_color; 2207 atomic_inc(&wq->nr_cwqs_to_flush); 2208 wait = true; 2209 } 2210 } 2211 2212 if (work_color >= 0) { 2213 BUG_ON(work_color != work_next_color(cwq->work_color)); 2214 cwq->work_color = work_color; 2215 } 2216 2217 spin_unlock_irq(&gcwq->lock); 2218 } 2219 2220 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush)) 2221 complete(&wq->first_flusher->done); 2222 2223 return wait; 2224 } 2225 2226 /** 2227 * flush_workqueue - ensure that any scheduled work has run to completion. 2228 * @wq: workqueue to flush 2229 * 2230 * Forces execution of the workqueue and blocks until its completion. 2231 * This is typically used in driver shutdown handlers. 2232 * 2233 * We sleep until all works which were queued on entry have been handled, 2234 * but we are not livelocked by new incoming ones. 2235 */ 2236 void flush_workqueue(struct workqueue_struct *wq) 2237 { 2238 struct wq_flusher this_flusher = { 2239 .list = LIST_HEAD_INIT(this_flusher.list), 2240 .flush_color = -1, 2241 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done), 2242 }; 2243 int next_color; 2244 2245 lock_map_acquire(&wq->lockdep_map); 2246 lock_map_release(&wq->lockdep_map); 2247 2248 mutex_lock(&wq->flush_mutex); 2249 2250 /* 2251 * Start-to-wait phase 2252 */ 2253 next_color = work_next_color(wq->work_color); 2254 2255 if (next_color != wq->flush_color) { 2256 /* 2257 * Color space is not full. The current work_color 2258 * becomes our flush_color and work_color is advanced 2259 * by one. 2260 */ 2261 BUG_ON(!list_empty(&wq->flusher_overflow)); 2262 this_flusher.flush_color = wq->work_color; 2263 wq->work_color = next_color; 2264 2265 if (!wq->first_flusher) { 2266 /* no flush in progress, become the first flusher */ 2267 BUG_ON(wq->flush_color != this_flusher.flush_color); 2268 2269 wq->first_flusher = &this_flusher; 2270 2271 if (!flush_workqueue_prep_cwqs(wq, wq->flush_color, 2272 wq->work_color)) { 2273 /* nothing to flush, done */ 2274 wq->flush_color = next_color; 2275 wq->first_flusher = NULL; 2276 goto out_unlock; 2277 } 2278 } else { 2279 /* wait in queue */ 2280 BUG_ON(wq->flush_color == this_flusher.flush_color); 2281 list_add_tail(&this_flusher.list, &wq->flusher_queue); 2282 flush_workqueue_prep_cwqs(wq, -1, wq->work_color); 2283 } 2284 } else { 2285 /* 2286 * Oops, color space is full, wait on overflow queue. 2287 * The next flush completion will assign us 2288 * flush_color and transfer to flusher_queue. 2289 */ 2290 list_add_tail(&this_flusher.list, &wq->flusher_overflow); 2291 } 2292 2293 mutex_unlock(&wq->flush_mutex); 2294 2295 wait_for_completion(&this_flusher.done); 2296 2297 /* 2298 * Wake-up-and-cascade phase 2299 * 2300 * First flushers are responsible for cascading flushes and 2301 * handling overflow. Non-first flushers can simply return. 2302 */ 2303 if (wq->first_flusher != &this_flusher) 2304 return; 2305 2306 mutex_lock(&wq->flush_mutex); 2307 2308 /* we might have raced, check again with mutex held */ 2309 if (wq->first_flusher != &this_flusher) 2310 goto out_unlock; 2311 2312 wq->first_flusher = NULL; 2313 2314 BUG_ON(!list_empty(&this_flusher.list)); 2315 BUG_ON(wq->flush_color != this_flusher.flush_color); 2316 2317 while (true) { 2318 struct wq_flusher *next, *tmp; 2319 2320 /* complete all the flushers sharing the current flush color */ 2321 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { 2322 if (next->flush_color != wq->flush_color) 2323 break; 2324 list_del_init(&next->list); 2325 complete(&next->done); 2326 } 2327 2328 BUG_ON(!list_empty(&wq->flusher_overflow) && 2329 wq->flush_color != work_next_color(wq->work_color)); 2330 2331 /* this flush_color is finished, advance by one */ 2332 wq->flush_color = work_next_color(wq->flush_color); 2333 2334 /* one color has been freed, handle overflow queue */ 2335 if (!list_empty(&wq->flusher_overflow)) { 2336 /* 2337 * Assign the same color to all overflowed 2338 * flushers, advance work_color and append to 2339 * flusher_queue. This is the start-to-wait 2340 * phase for these overflowed flushers. 2341 */ 2342 list_for_each_entry(tmp, &wq->flusher_overflow, list) 2343 tmp->flush_color = wq->work_color; 2344 2345 wq->work_color = work_next_color(wq->work_color); 2346 2347 list_splice_tail_init(&wq->flusher_overflow, 2348 &wq->flusher_queue); 2349 flush_workqueue_prep_cwqs(wq, -1, wq->work_color); 2350 } 2351 2352 if (list_empty(&wq->flusher_queue)) { 2353 BUG_ON(wq->flush_color != wq->work_color); 2354 break; 2355 } 2356 2357 /* 2358 * Need to flush more colors. Make the next flusher 2359 * the new first flusher and arm cwqs. 2360 */ 2361 BUG_ON(wq->flush_color == wq->work_color); 2362 BUG_ON(wq->flush_color != next->flush_color); 2363 2364 list_del_init(&next->list); 2365 wq->first_flusher = next; 2366 2367 if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1)) 2368 break; 2369 2370 /* 2371 * Meh... this color is already done, clear first 2372 * flusher and repeat cascading. 2373 */ 2374 wq->first_flusher = NULL; 2375 } 2376 2377 out_unlock: 2378 mutex_unlock(&wq->flush_mutex); 2379 } 2380 EXPORT_SYMBOL_GPL(flush_workqueue); 2381 2382 /** 2383 * drain_workqueue - drain a workqueue 2384 * @wq: workqueue to drain 2385 * 2386 * Wait until the workqueue becomes empty. While draining is in progress, 2387 * only chain queueing is allowed. IOW, only currently pending or running 2388 * work items on @wq can queue further work items on it. @wq is flushed 2389 * repeatedly until it becomes empty. The number of flushing is detemined 2390 * by the depth of chaining and should be relatively short. Whine if it 2391 * takes too long. 2392 */ 2393 void drain_workqueue(struct workqueue_struct *wq) 2394 { 2395 unsigned int flush_cnt = 0; 2396 unsigned int cpu; 2397 2398 /* 2399 * __queue_work() needs to test whether there are drainers, is much 2400 * hotter than drain_workqueue() and already looks at @wq->flags. 2401 * Use WQ_DRAINING so that queue doesn't have to check nr_drainers. 2402 */ 2403 spin_lock(&workqueue_lock); 2404 if (!wq->nr_drainers++) 2405 wq->flags |= WQ_DRAINING; 2406 spin_unlock(&workqueue_lock); 2407 reflush: 2408 flush_workqueue(wq); 2409 2410 for_each_cwq_cpu(cpu, wq) { 2411 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 2412 bool drained; 2413 2414 spin_lock_irq(&cwq->gcwq->lock); 2415 drained = !cwq->nr_active && list_empty(&cwq->delayed_works); 2416 spin_unlock_irq(&cwq->gcwq->lock); 2417 2418 if (drained) 2419 continue; 2420 2421 if (++flush_cnt == 10 || 2422 (flush_cnt % 100 == 0 && flush_cnt <= 1000)) 2423 pr_warning("workqueue %s: flush on destruction isn't complete after %u tries\n", 2424 wq->name, flush_cnt); 2425 goto reflush; 2426 } 2427 2428 spin_lock(&workqueue_lock); 2429 if (!--wq->nr_drainers) 2430 wq->flags &= ~WQ_DRAINING; 2431 spin_unlock(&workqueue_lock); 2432 } 2433 EXPORT_SYMBOL_GPL(drain_workqueue); 2434 2435 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, 2436 bool wait_executing) 2437 { 2438 struct worker *worker = NULL; 2439 struct global_cwq *gcwq; 2440 struct cpu_workqueue_struct *cwq; 2441 2442 might_sleep(); 2443 gcwq = get_work_gcwq(work); 2444 if (!gcwq) 2445 return false; 2446 2447 spin_lock_irq(&gcwq->lock); 2448 if (!list_empty(&work->entry)) { 2449 /* 2450 * See the comment near try_to_grab_pending()->smp_rmb(). 2451 * If it was re-queued to a different gcwq under us, we 2452 * are not going to wait. 2453 */ 2454 smp_rmb(); 2455 cwq = get_work_cwq(work); 2456 if (unlikely(!cwq || gcwq != cwq->gcwq)) 2457 goto already_gone; 2458 } else if (wait_executing) { 2459 worker = find_worker_executing_work(gcwq, work); 2460 if (!worker) 2461 goto already_gone; 2462 cwq = worker->current_cwq; 2463 } else 2464 goto already_gone; 2465 2466 insert_wq_barrier(cwq, barr, work, worker); 2467 spin_unlock_irq(&gcwq->lock); 2468 2469 /* 2470 * If @max_active is 1 or rescuer is in use, flushing another work 2471 * item on the same workqueue may lead to deadlock. Make sure the 2472 * flusher is not running on the same workqueue by verifying write 2473 * access. 2474 */ 2475 if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER) 2476 lock_map_acquire(&cwq->wq->lockdep_map); 2477 else 2478 lock_map_acquire_read(&cwq->wq->lockdep_map); 2479 lock_map_release(&cwq->wq->lockdep_map); 2480 2481 return true; 2482 already_gone: 2483 spin_unlock_irq(&gcwq->lock); 2484 return false; 2485 } 2486 2487 /** 2488 * flush_work - wait for a work to finish executing the last queueing instance 2489 * @work: the work to flush 2490 * 2491 * Wait until @work has finished execution. This function considers 2492 * only the last queueing instance of @work. If @work has been 2493 * enqueued across different CPUs on a non-reentrant workqueue or on 2494 * multiple workqueues, @work might still be executing on return on 2495 * some of the CPUs from earlier queueing. 2496 * 2497 * If @work was queued only on a non-reentrant, ordered or unbound 2498 * workqueue, @work is guaranteed to be idle on return if it hasn't 2499 * been requeued since flush started. 2500 * 2501 * RETURNS: 2502 * %true if flush_work() waited for the work to finish execution, 2503 * %false if it was already idle. 2504 */ 2505 bool flush_work(struct work_struct *work) 2506 { 2507 struct wq_barrier barr; 2508 2509 if (start_flush_work(work, &barr, true)) { 2510 wait_for_completion(&barr.done); 2511 destroy_work_on_stack(&barr.work); 2512 return true; 2513 } else 2514 return false; 2515 } 2516 EXPORT_SYMBOL_GPL(flush_work); 2517 2518 static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work) 2519 { 2520 struct wq_barrier barr; 2521 struct worker *worker; 2522 2523 spin_lock_irq(&gcwq->lock); 2524 2525 worker = find_worker_executing_work(gcwq, work); 2526 if (unlikely(worker)) 2527 insert_wq_barrier(worker->current_cwq, &barr, work, worker); 2528 2529 spin_unlock_irq(&gcwq->lock); 2530 2531 if (unlikely(worker)) { 2532 wait_for_completion(&barr.done); 2533 destroy_work_on_stack(&barr.work); 2534 return true; 2535 } else 2536 return false; 2537 } 2538 2539 static bool wait_on_work(struct work_struct *work) 2540 { 2541 bool ret = false; 2542 int cpu; 2543 2544 might_sleep(); 2545 2546 lock_map_acquire(&work->lockdep_map); 2547 lock_map_release(&work->lockdep_map); 2548 2549 for_each_gcwq_cpu(cpu) 2550 ret |= wait_on_cpu_work(get_gcwq(cpu), work); 2551 return ret; 2552 } 2553 2554 /** 2555 * flush_work_sync - wait until a work has finished execution 2556 * @work: the work to flush 2557 * 2558 * Wait until @work has finished execution. On return, it's 2559 * guaranteed that all queueing instances of @work which happened 2560 * before this function is called are finished. In other words, if 2561 * @work hasn't been requeued since this function was called, @work is 2562 * guaranteed to be idle on return. 2563 * 2564 * RETURNS: 2565 * %true if flush_work_sync() waited for the work to finish execution, 2566 * %false if it was already idle. 2567 */ 2568 bool flush_work_sync(struct work_struct *work) 2569 { 2570 struct wq_barrier barr; 2571 bool pending, waited; 2572 2573 /* we'll wait for executions separately, queue barr only if pending */ 2574 pending = start_flush_work(work, &barr, false); 2575 2576 /* wait for executions to finish */ 2577 waited = wait_on_work(work); 2578 2579 /* wait for the pending one */ 2580 if (pending) { 2581 wait_for_completion(&barr.done); 2582 destroy_work_on_stack(&barr.work); 2583 } 2584 2585 return pending || waited; 2586 } 2587 EXPORT_SYMBOL_GPL(flush_work_sync); 2588 2589 /* 2590 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, 2591 * so this work can't be re-armed in any way. 2592 */ 2593 static int try_to_grab_pending(struct work_struct *work) 2594 { 2595 struct global_cwq *gcwq; 2596 int ret = -1; 2597 2598 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) 2599 return 0; 2600 2601 /* 2602 * The queueing is in progress, or it is already queued. Try to 2603 * steal it from ->worklist without clearing WORK_STRUCT_PENDING. 2604 */ 2605 gcwq = get_work_gcwq(work); 2606 if (!gcwq) 2607 return ret; 2608 2609 spin_lock_irq(&gcwq->lock); 2610 if (!list_empty(&work->entry)) { 2611 /* 2612 * This work is queued, but perhaps we locked the wrong gcwq. 2613 * In that case we must see the new value after rmb(), see 2614 * insert_work()->wmb(). 2615 */ 2616 smp_rmb(); 2617 if (gcwq == get_work_gcwq(work)) { 2618 debug_work_deactivate(work); 2619 list_del_init(&work->entry); 2620 cwq_dec_nr_in_flight(get_work_cwq(work), 2621 get_work_color(work), 2622 *work_data_bits(work) & WORK_STRUCT_DELAYED); 2623 ret = 1; 2624 } 2625 } 2626 spin_unlock_irq(&gcwq->lock); 2627 2628 return ret; 2629 } 2630 2631 static bool __cancel_work_timer(struct work_struct *work, 2632 struct timer_list* timer) 2633 { 2634 int ret; 2635 2636 do { 2637 ret = (timer && likely(del_timer(timer))); 2638 if (!ret) 2639 ret = try_to_grab_pending(work); 2640 wait_on_work(work); 2641 } while (unlikely(ret < 0)); 2642 2643 clear_work_data(work); 2644 return ret; 2645 } 2646 2647 /** 2648 * cancel_work_sync - cancel a work and wait for it to finish 2649 * @work: the work to cancel 2650 * 2651 * Cancel @work and wait for its execution to finish. This function 2652 * can be used even if the work re-queues itself or migrates to 2653 * another workqueue. On return from this function, @work is 2654 * guaranteed to be not pending or executing on any CPU. 2655 * 2656 * cancel_work_sync(&delayed_work->work) must not be used for 2657 * delayed_work's. Use cancel_delayed_work_sync() instead. 2658 * 2659 * The caller must ensure that the workqueue on which @work was last 2660 * queued can't be destroyed before this function returns. 2661 * 2662 * RETURNS: 2663 * %true if @work was pending, %false otherwise. 2664 */ 2665 bool cancel_work_sync(struct work_struct *work) 2666 { 2667 return __cancel_work_timer(work, NULL); 2668 } 2669 EXPORT_SYMBOL_GPL(cancel_work_sync); 2670 2671 /** 2672 * flush_delayed_work - wait for a dwork to finish executing the last queueing 2673 * @dwork: the delayed work to flush 2674 * 2675 * Delayed timer is cancelled and the pending work is queued for 2676 * immediate execution. Like flush_work(), this function only 2677 * considers the last queueing instance of @dwork. 2678 * 2679 * RETURNS: 2680 * %true if flush_work() waited for the work to finish execution, 2681 * %false if it was already idle. 2682 */ 2683 bool flush_delayed_work(struct delayed_work *dwork) 2684 { 2685 if (del_timer_sync(&dwork->timer)) 2686 __queue_work(raw_smp_processor_id(), 2687 get_work_cwq(&dwork->work)->wq, &dwork->work); 2688 return flush_work(&dwork->work); 2689 } 2690 EXPORT_SYMBOL(flush_delayed_work); 2691 2692 /** 2693 * flush_delayed_work_sync - wait for a dwork to finish 2694 * @dwork: the delayed work to flush 2695 * 2696 * Delayed timer is cancelled and the pending work is queued for 2697 * execution immediately. Other than timer handling, its behavior 2698 * is identical to flush_work_sync(). 2699 * 2700 * RETURNS: 2701 * %true if flush_work_sync() waited for the work to finish execution, 2702 * %false if it was already idle. 2703 */ 2704 bool flush_delayed_work_sync(struct delayed_work *dwork) 2705 { 2706 if (del_timer_sync(&dwork->timer)) 2707 __queue_work(raw_smp_processor_id(), 2708 get_work_cwq(&dwork->work)->wq, &dwork->work); 2709 return flush_work_sync(&dwork->work); 2710 } 2711 EXPORT_SYMBOL(flush_delayed_work_sync); 2712 2713 /** 2714 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish 2715 * @dwork: the delayed work cancel 2716 * 2717 * This is cancel_work_sync() for delayed works. 2718 * 2719 * RETURNS: 2720 * %true if @dwork was pending, %false otherwise. 2721 */ 2722 bool cancel_delayed_work_sync(struct delayed_work *dwork) 2723 { 2724 return __cancel_work_timer(&dwork->work, &dwork->timer); 2725 } 2726 EXPORT_SYMBOL(cancel_delayed_work_sync); 2727 2728 /** 2729 * schedule_work - put work task in global workqueue 2730 * @work: job to be done 2731 * 2732 * Returns zero if @work was already on the kernel-global workqueue and 2733 * non-zero otherwise. 2734 * 2735 * This puts a job in the kernel-global workqueue if it was not already 2736 * queued and leaves it in the same position on the kernel-global 2737 * workqueue otherwise. 2738 */ 2739 int schedule_work(struct work_struct *work) 2740 { 2741 return queue_work(system_wq, work); 2742 } 2743 EXPORT_SYMBOL(schedule_work); 2744 2745 /* 2746 * schedule_work_on - put work task on a specific cpu 2747 * @cpu: cpu to put the work task on 2748 * @work: job to be done 2749 * 2750 * This puts a job on a specific cpu 2751 */ 2752 int schedule_work_on(int cpu, struct work_struct *work) 2753 { 2754 return queue_work_on(cpu, system_wq, work); 2755 } 2756 EXPORT_SYMBOL(schedule_work_on); 2757 2758 /** 2759 * schedule_delayed_work - put work task in global workqueue after delay 2760 * @dwork: job to be done 2761 * @delay: number of jiffies to wait or 0 for immediate execution 2762 * 2763 * After waiting for a given time this puts a job in the kernel-global 2764 * workqueue. 2765 */ 2766 int schedule_delayed_work(struct delayed_work *dwork, 2767 unsigned long delay) 2768 { 2769 return queue_delayed_work(system_wq, dwork, delay); 2770 } 2771 EXPORT_SYMBOL(schedule_delayed_work); 2772 2773 /** 2774 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 2775 * @cpu: cpu to use 2776 * @dwork: job to be done 2777 * @delay: number of jiffies to wait 2778 * 2779 * After waiting for a given time this puts a job in the kernel-global 2780 * workqueue on the specified CPU. 2781 */ 2782 int schedule_delayed_work_on(int cpu, 2783 struct delayed_work *dwork, unsigned long delay) 2784 { 2785 return queue_delayed_work_on(cpu, system_wq, dwork, delay); 2786 } 2787 EXPORT_SYMBOL(schedule_delayed_work_on); 2788 2789 /** 2790 * schedule_on_each_cpu - execute a function synchronously on each online CPU 2791 * @func: the function to call 2792 * 2793 * schedule_on_each_cpu() executes @func on each online CPU using the 2794 * system workqueue and blocks until all CPUs have completed. 2795 * schedule_on_each_cpu() is very slow. 2796 * 2797 * RETURNS: 2798 * 0 on success, -errno on failure. 2799 */ 2800 int schedule_on_each_cpu(work_func_t func) 2801 { 2802 int cpu; 2803 struct work_struct __percpu *works; 2804 2805 works = alloc_percpu(struct work_struct); 2806 if (!works) 2807 return -ENOMEM; 2808 2809 get_online_cpus(); 2810 2811 for_each_online_cpu(cpu) { 2812 struct work_struct *work = per_cpu_ptr(works, cpu); 2813 2814 INIT_WORK(work, func); 2815 schedule_work_on(cpu, work); 2816 } 2817 2818 for_each_online_cpu(cpu) 2819 flush_work(per_cpu_ptr(works, cpu)); 2820 2821 put_online_cpus(); 2822 free_percpu(works); 2823 return 0; 2824 } 2825 2826 /** 2827 * flush_scheduled_work - ensure that any scheduled work has run to completion. 2828 * 2829 * Forces execution of the kernel-global workqueue and blocks until its 2830 * completion. 2831 * 2832 * Think twice before calling this function! It's very easy to get into 2833 * trouble if you don't take great care. Either of the following situations 2834 * will lead to deadlock: 2835 * 2836 * One of the work items currently on the workqueue needs to acquire 2837 * a lock held by your code or its caller. 2838 * 2839 * Your code is running in the context of a work routine. 2840 * 2841 * They will be detected by lockdep when they occur, but the first might not 2842 * occur very often. It depends on what work items are on the workqueue and 2843 * what locks they need, which you have no control over. 2844 * 2845 * In most situations flushing the entire workqueue is overkill; you merely 2846 * need to know that a particular work item isn't queued and isn't running. 2847 * In such cases you should use cancel_delayed_work_sync() or 2848 * cancel_work_sync() instead. 2849 */ 2850 void flush_scheduled_work(void) 2851 { 2852 flush_workqueue(system_wq); 2853 } 2854 EXPORT_SYMBOL(flush_scheduled_work); 2855 2856 /** 2857 * execute_in_process_context - reliably execute the routine with user context 2858 * @fn: the function to execute 2859 * @ew: guaranteed storage for the execute work structure (must 2860 * be available when the work executes) 2861 * 2862 * Executes the function immediately if process context is available, 2863 * otherwise schedules the function for delayed execution. 2864 * 2865 * Returns: 0 - function was executed 2866 * 1 - function was scheduled for execution 2867 */ 2868 int execute_in_process_context(work_func_t fn, struct execute_work *ew) 2869 { 2870 if (!in_interrupt()) { 2871 fn(&ew->work); 2872 return 0; 2873 } 2874 2875 INIT_WORK(&ew->work, fn); 2876 schedule_work(&ew->work); 2877 2878 return 1; 2879 } 2880 EXPORT_SYMBOL_GPL(execute_in_process_context); 2881 2882 int keventd_up(void) 2883 { 2884 return system_wq != NULL; 2885 } 2886 2887 static int alloc_cwqs(struct workqueue_struct *wq) 2888 { 2889 /* 2890 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS. 2891 * Make sure that the alignment isn't lower than that of 2892 * unsigned long long. 2893 */ 2894 const size_t size = sizeof(struct cpu_workqueue_struct); 2895 const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS, 2896 __alignof__(unsigned long long)); 2897 2898 if (!(wq->flags & WQ_UNBOUND)) 2899 wq->cpu_wq.pcpu = __alloc_percpu(size, align); 2900 else { 2901 void *ptr; 2902 2903 /* 2904 * Allocate enough room to align cwq and put an extra 2905 * pointer at the end pointing back to the originally 2906 * allocated pointer which will be used for free. 2907 */ 2908 ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL); 2909 if (ptr) { 2910 wq->cpu_wq.single = PTR_ALIGN(ptr, align); 2911 *(void **)(wq->cpu_wq.single + 1) = ptr; 2912 } 2913 } 2914 2915 /* just in case, make sure it's actually aligned */ 2916 BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align)); 2917 return wq->cpu_wq.v ? 0 : -ENOMEM; 2918 } 2919 2920 static void free_cwqs(struct workqueue_struct *wq) 2921 { 2922 if (!(wq->flags & WQ_UNBOUND)) 2923 free_percpu(wq->cpu_wq.pcpu); 2924 else if (wq->cpu_wq.single) { 2925 /* the pointer to free is stored right after the cwq */ 2926 kfree(*(void **)(wq->cpu_wq.single + 1)); 2927 } 2928 } 2929 2930 static int wq_clamp_max_active(int max_active, unsigned int flags, 2931 const char *name) 2932 { 2933 int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE; 2934 2935 if (max_active < 1 || max_active > lim) 2936 printk(KERN_WARNING "workqueue: max_active %d requested for %s " 2937 "is out of range, clamping between %d and %d\n", 2938 max_active, name, 1, lim); 2939 2940 return clamp_val(max_active, 1, lim); 2941 } 2942 2943 struct workqueue_struct *__alloc_workqueue_key(const char *fmt, 2944 unsigned int flags, 2945 int max_active, 2946 struct lock_class_key *key, 2947 const char *lock_name, ...) 2948 { 2949 va_list args, args1; 2950 struct workqueue_struct *wq; 2951 unsigned int cpu; 2952 size_t namelen; 2953 2954 /* determine namelen, allocate wq and format name */ 2955 va_start(args, lock_name); 2956 va_copy(args1, args); 2957 namelen = vsnprintf(NULL, 0, fmt, args) + 1; 2958 2959 wq = kzalloc(sizeof(*wq) + namelen, GFP_KERNEL); 2960 if (!wq) 2961 goto err; 2962 2963 vsnprintf(wq->name, namelen, fmt, args1); 2964 va_end(args); 2965 va_end(args1); 2966 2967 /* 2968 * Workqueues which may be used during memory reclaim should 2969 * have a rescuer to guarantee forward progress. 2970 */ 2971 if (flags & WQ_MEM_RECLAIM) 2972 flags |= WQ_RESCUER; 2973 2974 /* 2975 * Unbound workqueues aren't concurrency managed and should be 2976 * dispatched to workers immediately. 2977 */ 2978 if (flags & WQ_UNBOUND) 2979 flags |= WQ_HIGHPRI; 2980 2981 max_active = max_active ?: WQ_DFL_ACTIVE; 2982 max_active = wq_clamp_max_active(max_active, flags, wq->name); 2983 2984 /* init wq */ 2985 wq->flags = flags; 2986 wq->saved_max_active = max_active; 2987 mutex_init(&wq->flush_mutex); 2988 atomic_set(&wq->nr_cwqs_to_flush, 0); 2989 INIT_LIST_HEAD(&wq->flusher_queue); 2990 INIT_LIST_HEAD(&wq->flusher_overflow); 2991 2992 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); 2993 INIT_LIST_HEAD(&wq->list); 2994 2995 if (alloc_cwqs(wq) < 0) 2996 goto err; 2997 2998 for_each_cwq_cpu(cpu, wq) { 2999 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3000 struct global_cwq *gcwq = get_gcwq(cpu); 3001 3002 BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK); 3003 cwq->gcwq = gcwq; 3004 cwq->wq = wq; 3005 cwq->flush_color = -1; 3006 cwq->max_active = max_active; 3007 INIT_LIST_HEAD(&cwq->delayed_works); 3008 } 3009 3010 if (flags & WQ_RESCUER) { 3011 struct worker *rescuer; 3012 3013 if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL)) 3014 goto err; 3015 3016 wq->rescuer = rescuer = alloc_worker(); 3017 if (!rescuer) 3018 goto err; 3019 3020 rescuer->task = kthread_create(rescuer_thread, wq, "%s", 3021 wq->name); 3022 if (IS_ERR(rescuer->task)) 3023 goto err; 3024 3025 rescuer->task->flags |= PF_THREAD_BOUND; 3026 wake_up_process(rescuer->task); 3027 } 3028 3029 /* 3030 * workqueue_lock protects global freeze state and workqueues 3031 * list. Grab it, set max_active accordingly and add the new 3032 * workqueue to workqueues list. 3033 */ 3034 spin_lock(&workqueue_lock); 3035 3036 if (workqueue_freezing && wq->flags & WQ_FREEZABLE) 3037 for_each_cwq_cpu(cpu, wq) 3038 get_cwq(cpu, wq)->max_active = 0; 3039 3040 list_add(&wq->list, &workqueues); 3041 3042 spin_unlock(&workqueue_lock); 3043 3044 return wq; 3045 err: 3046 if (wq) { 3047 free_cwqs(wq); 3048 free_mayday_mask(wq->mayday_mask); 3049 kfree(wq->rescuer); 3050 kfree(wq); 3051 } 3052 return NULL; 3053 } 3054 EXPORT_SYMBOL_GPL(__alloc_workqueue_key); 3055 3056 /** 3057 * destroy_workqueue - safely terminate a workqueue 3058 * @wq: target workqueue 3059 * 3060 * Safely destroy a workqueue. All work currently pending will be done first. 3061 */ 3062 void destroy_workqueue(struct workqueue_struct *wq) 3063 { 3064 unsigned int cpu; 3065 3066 /* drain it before proceeding with destruction */ 3067 drain_workqueue(wq); 3068 3069 /* 3070 * wq list is used to freeze wq, remove from list after 3071 * flushing is complete in case freeze races us. 3072 */ 3073 spin_lock(&workqueue_lock); 3074 list_del(&wq->list); 3075 spin_unlock(&workqueue_lock); 3076 3077 /* sanity check */ 3078 for_each_cwq_cpu(cpu, wq) { 3079 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3080 int i; 3081 3082 for (i = 0; i < WORK_NR_COLORS; i++) 3083 BUG_ON(cwq->nr_in_flight[i]); 3084 BUG_ON(cwq->nr_active); 3085 BUG_ON(!list_empty(&cwq->delayed_works)); 3086 } 3087 3088 if (wq->flags & WQ_RESCUER) { 3089 kthread_stop(wq->rescuer->task); 3090 free_mayday_mask(wq->mayday_mask); 3091 kfree(wq->rescuer); 3092 } 3093 3094 free_cwqs(wq); 3095 kfree(wq); 3096 } 3097 EXPORT_SYMBOL_GPL(destroy_workqueue); 3098 3099 /** 3100 * workqueue_set_max_active - adjust max_active of a workqueue 3101 * @wq: target workqueue 3102 * @max_active: new max_active value. 3103 * 3104 * Set max_active of @wq to @max_active. 3105 * 3106 * CONTEXT: 3107 * Don't call from IRQ context. 3108 */ 3109 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) 3110 { 3111 unsigned int cpu; 3112 3113 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); 3114 3115 spin_lock(&workqueue_lock); 3116 3117 wq->saved_max_active = max_active; 3118 3119 for_each_cwq_cpu(cpu, wq) { 3120 struct global_cwq *gcwq = get_gcwq(cpu); 3121 3122 spin_lock_irq(&gcwq->lock); 3123 3124 if (!(wq->flags & WQ_FREEZABLE) || 3125 !(gcwq->flags & GCWQ_FREEZING)) 3126 get_cwq(gcwq->cpu, wq)->max_active = max_active; 3127 3128 spin_unlock_irq(&gcwq->lock); 3129 } 3130 3131 spin_unlock(&workqueue_lock); 3132 } 3133 EXPORT_SYMBOL_GPL(workqueue_set_max_active); 3134 3135 /** 3136 * workqueue_congested - test whether a workqueue is congested 3137 * @cpu: CPU in question 3138 * @wq: target workqueue 3139 * 3140 * Test whether @wq's cpu workqueue for @cpu is congested. There is 3141 * no synchronization around this function and the test result is 3142 * unreliable and only useful as advisory hints or for debugging. 3143 * 3144 * RETURNS: 3145 * %true if congested, %false otherwise. 3146 */ 3147 bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq) 3148 { 3149 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3150 3151 return !list_empty(&cwq->delayed_works); 3152 } 3153 EXPORT_SYMBOL_GPL(workqueue_congested); 3154 3155 /** 3156 * work_cpu - return the last known associated cpu for @work 3157 * @work: the work of interest 3158 * 3159 * RETURNS: 3160 * CPU number if @work was ever queued. WORK_CPU_NONE otherwise. 3161 */ 3162 unsigned int work_cpu(struct work_struct *work) 3163 { 3164 struct global_cwq *gcwq = get_work_gcwq(work); 3165 3166 return gcwq ? gcwq->cpu : WORK_CPU_NONE; 3167 } 3168 EXPORT_SYMBOL_GPL(work_cpu); 3169 3170 /** 3171 * work_busy - test whether a work is currently pending or running 3172 * @work: the work to be tested 3173 * 3174 * Test whether @work is currently pending or running. There is no 3175 * synchronization around this function and the test result is 3176 * unreliable and only useful as advisory hints or for debugging. 3177 * Especially for reentrant wqs, the pending state might hide the 3178 * running state. 3179 * 3180 * RETURNS: 3181 * OR'd bitmask of WORK_BUSY_* bits. 3182 */ 3183 unsigned int work_busy(struct work_struct *work) 3184 { 3185 struct global_cwq *gcwq = get_work_gcwq(work); 3186 unsigned long flags; 3187 unsigned int ret = 0; 3188 3189 if (!gcwq) 3190 return false; 3191 3192 spin_lock_irqsave(&gcwq->lock, flags); 3193 3194 if (work_pending(work)) 3195 ret |= WORK_BUSY_PENDING; 3196 if (find_worker_executing_work(gcwq, work)) 3197 ret |= WORK_BUSY_RUNNING; 3198 3199 spin_unlock_irqrestore(&gcwq->lock, flags); 3200 3201 return ret; 3202 } 3203 EXPORT_SYMBOL_GPL(work_busy); 3204 3205 /* 3206 * CPU hotplug. 3207 * 3208 * There are two challenges in supporting CPU hotplug. Firstly, there 3209 * are a lot of assumptions on strong associations among work, cwq and 3210 * gcwq which make migrating pending and scheduled works very 3211 * difficult to implement without impacting hot paths. Secondly, 3212 * gcwqs serve mix of short, long and very long running works making 3213 * blocked draining impractical. 3214 * 3215 * This is solved by allowing a gcwq to be detached from CPU, running 3216 * it with unbound (rogue) workers and allowing it to be reattached 3217 * later if the cpu comes back online. A separate thread is created 3218 * to govern a gcwq in such state and is called the trustee of the 3219 * gcwq. 3220 * 3221 * Trustee states and their descriptions. 3222 * 3223 * START Command state used on startup. On CPU_DOWN_PREPARE, a 3224 * new trustee is started with this state. 3225 * 3226 * IN_CHARGE Once started, trustee will enter this state after 3227 * assuming the manager role and making all existing 3228 * workers rogue. DOWN_PREPARE waits for trustee to 3229 * enter this state. After reaching IN_CHARGE, trustee 3230 * tries to execute the pending worklist until it's empty 3231 * and the state is set to BUTCHER, or the state is set 3232 * to RELEASE. 3233 * 3234 * BUTCHER Command state which is set by the cpu callback after 3235 * the cpu has went down. Once this state is set trustee 3236 * knows that there will be no new works on the worklist 3237 * and once the worklist is empty it can proceed to 3238 * killing idle workers. 3239 * 3240 * RELEASE Command state which is set by the cpu callback if the 3241 * cpu down has been canceled or it has come online 3242 * again. After recognizing this state, trustee stops 3243 * trying to drain or butcher and clears ROGUE, rebinds 3244 * all remaining workers back to the cpu and releases 3245 * manager role. 3246 * 3247 * DONE Trustee will enter this state after BUTCHER or RELEASE 3248 * is complete. 3249 * 3250 * trustee CPU draining 3251 * took over down complete 3252 * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE 3253 * | | ^ 3254 * | CPU is back online v return workers | 3255 * ----------------> RELEASE -------------- 3256 */ 3257 3258 /** 3259 * trustee_wait_event_timeout - timed event wait for trustee 3260 * @cond: condition to wait for 3261 * @timeout: timeout in jiffies 3262 * 3263 * wait_event_timeout() for trustee to use. Handles locking and 3264 * checks for RELEASE request. 3265 * 3266 * CONTEXT: 3267 * spin_lock_irq(gcwq->lock) which may be released and regrabbed 3268 * multiple times. To be used by trustee. 3269 * 3270 * RETURNS: 3271 * Positive indicating left time if @cond is satisfied, 0 if timed 3272 * out, -1 if canceled. 3273 */ 3274 #define trustee_wait_event_timeout(cond, timeout) ({ \ 3275 long __ret = (timeout); \ 3276 while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \ 3277 __ret) { \ 3278 spin_unlock_irq(&gcwq->lock); \ 3279 __wait_event_timeout(gcwq->trustee_wait, (cond) || \ 3280 (gcwq->trustee_state == TRUSTEE_RELEASE), \ 3281 __ret); \ 3282 spin_lock_irq(&gcwq->lock); \ 3283 } \ 3284 gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \ 3285 }) 3286 3287 /** 3288 * trustee_wait_event - event wait for trustee 3289 * @cond: condition to wait for 3290 * 3291 * wait_event() for trustee to use. Automatically handles locking and 3292 * checks for CANCEL request. 3293 * 3294 * CONTEXT: 3295 * spin_lock_irq(gcwq->lock) which may be released and regrabbed 3296 * multiple times. To be used by trustee. 3297 * 3298 * RETURNS: 3299 * 0 if @cond is satisfied, -1 if canceled. 3300 */ 3301 #define trustee_wait_event(cond) ({ \ 3302 long __ret1; \ 3303 __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\ 3304 __ret1 < 0 ? -1 : 0; \ 3305 }) 3306 3307 static int __cpuinit trustee_thread(void *__gcwq) 3308 { 3309 struct global_cwq *gcwq = __gcwq; 3310 struct worker *worker; 3311 struct work_struct *work; 3312 struct hlist_node *pos; 3313 long rc; 3314 int i; 3315 3316 BUG_ON(gcwq->cpu != smp_processor_id()); 3317 3318 spin_lock_irq(&gcwq->lock); 3319 /* 3320 * Claim the manager position and make all workers rogue. 3321 * Trustee must be bound to the target cpu and can't be 3322 * cancelled. 3323 */ 3324 BUG_ON(gcwq->cpu != smp_processor_id()); 3325 rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS)); 3326 BUG_ON(rc < 0); 3327 3328 gcwq->flags |= GCWQ_MANAGING_WORKERS; 3329 3330 list_for_each_entry(worker, &gcwq->idle_list, entry) 3331 worker->flags |= WORKER_ROGUE; 3332 3333 for_each_busy_worker(worker, i, pos, gcwq) 3334 worker->flags |= WORKER_ROGUE; 3335 3336 /* 3337 * Call schedule() so that we cross rq->lock and thus can 3338 * guarantee sched callbacks see the rogue flag. This is 3339 * necessary as scheduler callbacks may be invoked from other 3340 * cpus. 3341 */ 3342 spin_unlock_irq(&gcwq->lock); 3343 schedule(); 3344 spin_lock_irq(&gcwq->lock); 3345 3346 /* 3347 * Sched callbacks are disabled now. Zap nr_running. After 3348 * this, nr_running stays zero and need_more_worker() and 3349 * keep_working() are always true as long as the worklist is 3350 * not empty. 3351 */ 3352 atomic_set(get_gcwq_nr_running(gcwq->cpu), 0); 3353 3354 spin_unlock_irq(&gcwq->lock); 3355 del_timer_sync(&gcwq->idle_timer); 3356 spin_lock_irq(&gcwq->lock); 3357 3358 /* 3359 * We're now in charge. Notify and proceed to drain. We need 3360 * to keep the gcwq running during the whole CPU down 3361 * procedure as other cpu hotunplug callbacks may need to 3362 * flush currently running tasks. 3363 */ 3364 gcwq->trustee_state = TRUSTEE_IN_CHARGE; 3365 wake_up_all(&gcwq->trustee_wait); 3366 3367 /* 3368 * The original cpu is in the process of dying and may go away 3369 * anytime now. When that happens, we and all workers would 3370 * be migrated to other cpus. Try draining any left work. We 3371 * want to get it over with ASAP - spam rescuers, wake up as 3372 * many idlers as necessary and create new ones till the 3373 * worklist is empty. Note that if the gcwq is frozen, there 3374 * may be frozen works in freezable cwqs. Don't declare 3375 * completion while frozen. 3376 */ 3377 while (gcwq->nr_workers != gcwq->nr_idle || 3378 gcwq->flags & GCWQ_FREEZING || 3379 gcwq->trustee_state == TRUSTEE_IN_CHARGE) { 3380 int nr_works = 0; 3381 3382 list_for_each_entry(work, &gcwq->worklist, entry) { 3383 send_mayday(work); 3384 nr_works++; 3385 } 3386 3387 list_for_each_entry(worker, &gcwq->idle_list, entry) { 3388 if (!nr_works--) 3389 break; 3390 wake_up_process(worker->task); 3391 } 3392 3393 if (need_to_create_worker(gcwq)) { 3394 spin_unlock_irq(&gcwq->lock); 3395 worker = create_worker(gcwq, false); 3396 spin_lock_irq(&gcwq->lock); 3397 if (worker) { 3398 worker->flags |= WORKER_ROGUE; 3399 start_worker(worker); 3400 } 3401 } 3402 3403 /* give a breather */ 3404 if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0) 3405 break; 3406 } 3407 3408 /* 3409 * Either all works have been scheduled and cpu is down, or 3410 * cpu down has already been canceled. Wait for and butcher 3411 * all workers till we're canceled. 3412 */ 3413 do { 3414 rc = trustee_wait_event(!list_empty(&gcwq->idle_list)); 3415 while (!list_empty(&gcwq->idle_list)) 3416 destroy_worker(list_first_entry(&gcwq->idle_list, 3417 struct worker, entry)); 3418 } while (gcwq->nr_workers && rc >= 0); 3419 3420 /* 3421 * At this point, either draining has completed and no worker 3422 * is left, or cpu down has been canceled or the cpu is being 3423 * brought back up. There shouldn't be any idle one left. 3424 * Tell the remaining busy ones to rebind once it finishes the 3425 * currently scheduled works by scheduling the rebind_work. 3426 */ 3427 WARN_ON(!list_empty(&gcwq->idle_list)); 3428 3429 for_each_busy_worker(worker, i, pos, gcwq) { 3430 struct work_struct *rebind_work = &worker->rebind_work; 3431 3432 /* 3433 * Rebind_work may race with future cpu hotplug 3434 * operations. Use a separate flag to mark that 3435 * rebinding is scheduled. 3436 */ 3437 worker->flags |= WORKER_REBIND; 3438 worker->flags &= ~WORKER_ROGUE; 3439 3440 /* queue rebind_work, wq doesn't matter, use the default one */ 3441 if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, 3442 work_data_bits(rebind_work))) 3443 continue; 3444 3445 debug_work_activate(rebind_work); 3446 insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work, 3447 worker->scheduled.next, 3448 work_color_to_flags(WORK_NO_COLOR)); 3449 } 3450 3451 /* relinquish manager role */ 3452 gcwq->flags &= ~GCWQ_MANAGING_WORKERS; 3453 3454 /* notify completion */ 3455 gcwq->trustee = NULL; 3456 gcwq->trustee_state = TRUSTEE_DONE; 3457 wake_up_all(&gcwq->trustee_wait); 3458 spin_unlock_irq(&gcwq->lock); 3459 return 0; 3460 } 3461 3462 /** 3463 * wait_trustee_state - wait for trustee to enter the specified state 3464 * @gcwq: gcwq the trustee of interest belongs to 3465 * @state: target state to wait for 3466 * 3467 * Wait for the trustee to reach @state. DONE is already matched. 3468 * 3469 * CONTEXT: 3470 * spin_lock_irq(gcwq->lock) which may be released and regrabbed 3471 * multiple times. To be used by cpu_callback. 3472 */ 3473 static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) 3474 __releases(&gcwq->lock) 3475 __acquires(&gcwq->lock) 3476 { 3477 if (!(gcwq->trustee_state == state || 3478 gcwq->trustee_state == TRUSTEE_DONE)) { 3479 spin_unlock_irq(&gcwq->lock); 3480 __wait_event(gcwq->trustee_wait, 3481 gcwq->trustee_state == state || 3482 gcwq->trustee_state == TRUSTEE_DONE); 3483 spin_lock_irq(&gcwq->lock); 3484 } 3485 } 3486 3487 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, 3488 unsigned long action, 3489 void *hcpu) 3490 { 3491 unsigned int cpu = (unsigned long)hcpu; 3492 struct global_cwq *gcwq = get_gcwq(cpu); 3493 struct task_struct *new_trustee = NULL; 3494 struct worker *uninitialized_var(new_worker); 3495 unsigned long flags; 3496 3497 action &= ~CPU_TASKS_FROZEN; 3498 3499 switch (action) { 3500 case CPU_DOWN_PREPARE: 3501 new_trustee = kthread_create(trustee_thread, gcwq, 3502 "workqueue_trustee/%d\n", cpu); 3503 if (IS_ERR(new_trustee)) 3504 return notifier_from_errno(PTR_ERR(new_trustee)); 3505 kthread_bind(new_trustee, cpu); 3506 /* fall through */ 3507 case CPU_UP_PREPARE: 3508 BUG_ON(gcwq->first_idle); 3509 new_worker = create_worker(gcwq, false); 3510 if (!new_worker) { 3511 if (new_trustee) 3512 kthread_stop(new_trustee); 3513 return NOTIFY_BAD; 3514 } 3515 } 3516 3517 /* some are called w/ irq disabled, don't disturb irq status */ 3518 spin_lock_irqsave(&gcwq->lock, flags); 3519 3520 switch (action) { 3521 case CPU_DOWN_PREPARE: 3522 /* initialize trustee and tell it to acquire the gcwq */ 3523 BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE); 3524 gcwq->trustee = new_trustee; 3525 gcwq->trustee_state = TRUSTEE_START; 3526 wake_up_process(gcwq->trustee); 3527 wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE); 3528 /* fall through */ 3529 case CPU_UP_PREPARE: 3530 BUG_ON(gcwq->first_idle); 3531 gcwq->first_idle = new_worker; 3532 break; 3533 3534 case CPU_DYING: 3535 /* 3536 * Before this, the trustee and all workers except for 3537 * the ones which are still executing works from 3538 * before the last CPU down must be on the cpu. After 3539 * this, they'll all be diasporas. 3540 */ 3541 gcwq->flags |= GCWQ_DISASSOCIATED; 3542 break; 3543 3544 case CPU_POST_DEAD: 3545 gcwq->trustee_state = TRUSTEE_BUTCHER; 3546 /* fall through */ 3547 case CPU_UP_CANCELED: 3548 destroy_worker(gcwq->first_idle); 3549 gcwq->first_idle = NULL; 3550 break; 3551 3552 case CPU_DOWN_FAILED: 3553 case CPU_ONLINE: 3554 gcwq->flags &= ~GCWQ_DISASSOCIATED; 3555 if (gcwq->trustee_state != TRUSTEE_DONE) { 3556 gcwq->trustee_state = TRUSTEE_RELEASE; 3557 wake_up_process(gcwq->trustee); 3558 wait_trustee_state(gcwq, TRUSTEE_DONE); 3559 } 3560 3561 /* 3562 * Trustee is done and there might be no worker left. 3563 * Put the first_idle in and request a real manager to 3564 * take a look. 3565 */ 3566 spin_unlock_irq(&gcwq->lock); 3567 kthread_bind(gcwq->first_idle->task, cpu); 3568 spin_lock_irq(&gcwq->lock); 3569 gcwq->flags |= GCWQ_MANAGE_WORKERS; 3570 start_worker(gcwq->first_idle); 3571 gcwq->first_idle = NULL; 3572 break; 3573 } 3574 3575 spin_unlock_irqrestore(&gcwq->lock, flags); 3576 3577 return notifier_from_errno(0); 3578 } 3579 3580 #ifdef CONFIG_SMP 3581 3582 struct work_for_cpu { 3583 struct completion completion; 3584 long (*fn)(void *); 3585 void *arg; 3586 long ret; 3587 }; 3588 3589 static int do_work_for_cpu(void *_wfc) 3590 { 3591 struct work_for_cpu *wfc = _wfc; 3592 wfc->ret = wfc->fn(wfc->arg); 3593 complete(&wfc->completion); 3594 return 0; 3595 } 3596 3597 /** 3598 * work_on_cpu - run a function in user context on a particular cpu 3599 * @cpu: the cpu to run on 3600 * @fn: the function to run 3601 * @arg: the function arg 3602 * 3603 * This will return the value @fn returns. 3604 * It is up to the caller to ensure that the cpu doesn't go offline. 3605 * The caller must not hold any locks which would prevent @fn from completing. 3606 */ 3607 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) 3608 { 3609 struct task_struct *sub_thread; 3610 struct work_for_cpu wfc = { 3611 .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion), 3612 .fn = fn, 3613 .arg = arg, 3614 }; 3615 3616 sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu"); 3617 if (IS_ERR(sub_thread)) 3618 return PTR_ERR(sub_thread); 3619 kthread_bind(sub_thread, cpu); 3620 wake_up_process(sub_thread); 3621 wait_for_completion(&wfc.completion); 3622 return wfc.ret; 3623 } 3624 EXPORT_SYMBOL_GPL(work_on_cpu); 3625 #endif /* CONFIG_SMP */ 3626 3627 #ifdef CONFIG_FREEZER 3628 3629 /** 3630 * freeze_workqueues_begin - begin freezing workqueues 3631 * 3632 * Start freezing workqueues. After this function returns, all freezable 3633 * workqueues will queue new works to their frozen_works list instead of 3634 * gcwq->worklist. 3635 * 3636 * CONTEXT: 3637 * Grabs and releases workqueue_lock and gcwq->lock's. 3638 */ 3639 void freeze_workqueues_begin(void) 3640 { 3641 unsigned int cpu; 3642 3643 spin_lock(&workqueue_lock); 3644 3645 BUG_ON(workqueue_freezing); 3646 workqueue_freezing = true; 3647 3648 for_each_gcwq_cpu(cpu) { 3649 struct global_cwq *gcwq = get_gcwq(cpu); 3650 struct workqueue_struct *wq; 3651 3652 spin_lock_irq(&gcwq->lock); 3653 3654 BUG_ON(gcwq->flags & GCWQ_FREEZING); 3655 gcwq->flags |= GCWQ_FREEZING; 3656 3657 list_for_each_entry(wq, &workqueues, list) { 3658 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3659 3660 if (cwq && wq->flags & WQ_FREEZABLE) 3661 cwq->max_active = 0; 3662 } 3663 3664 spin_unlock_irq(&gcwq->lock); 3665 } 3666 3667 spin_unlock(&workqueue_lock); 3668 } 3669 3670 /** 3671 * freeze_workqueues_busy - are freezable workqueues still busy? 3672 * 3673 * Check whether freezing is complete. This function must be called 3674 * between freeze_workqueues_begin() and thaw_workqueues(). 3675 * 3676 * CONTEXT: 3677 * Grabs and releases workqueue_lock. 3678 * 3679 * RETURNS: 3680 * %true if some freezable workqueues are still busy. %false if freezing 3681 * is complete. 3682 */ 3683 bool freeze_workqueues_busy(void) 3684 { 3685 unsigned int cpu; 3686 bool busy = false; 3687 3688 spin_lock(&workqueue_lock); 3689 3690 BUG_ON(!workqueue_freezing); 3691 3692 for_each_gcwq_cpu(cpu) { 3693 struct workqueue_struct *wq; 3694 /* 3695 * nr_active is monotonically decreasing. It's safe 3696 * to peek without lock. 3697 */ 3698 list_for_each_entry(wq, &workqueues, list) { 3699 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3700 3701 if (!cwq || !(wq->flags & WQ_FREEZABLE)) 3702 continue; 3703 3704 BUG_ON(cwq->nr_active < 0); 3705 if (cwq->nr_active) { 3706 busy = true; 3707 goto out_unlock; 3708 } 3709 } 3710 } 3711 out_unlock: 3712 spin_unlock(&workqueue_lock); 3713 return busy; 3714 } 3715 3716 /** 3717 * thaw_workqueues - thaw workqueues 3718 * 3719 * Thaw workqueues. Normal queueing is restored and all collected 3720 * frozen works are transferred to their respective gcwq worklists. 3721 * 3722 * CONTEXT: 3723 * Grabs and releases workqueue_lock and gcwq->lock's. 3724 */ 3725 void thaw_workqueues(void) 3726 { 3727 unsigned int cpu; 3728 3729 spin_lock(&workqueue_lock); 3730 3731 if (!workqueue_freezing) 3732 goto out_unlock; 3733 3734 for_each_gcwq_cpu(cpu) { 3735 struct global_cwq *gcwq = get_gcwq(cpu); 3736 struct workqueue_struct *wq; 3737 3738 spin_lock_irq(&gcwq->lock); 3739 3740 BUG_ON(!(gcwq->flags & GCWQ_FREEZING)); 3741 gcwq->flags &= ~GCWQ_FREEZING; 3742 3743 list_for_each_entry(wq, &workqueues, list) { 3744 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3745 3746 if (!cwq || !(wq->flags & WQ_FREEZABLE)) 3747 continue; 3748 3749 /* restore max_active and repopulate worklist */ 3750 cwq->max_active = wq->saved_max_active; 3751 3752 while (!list_empty(&cwq->delayed_works) && 3753 cwq->nr_active < cwq->max_active) 3754 cwq_activate_first_delayed(cwq); 3755 } 3756 3757 wake_up_worker(gcwq); 3758 3759 spin_unlock_irq(&gcwq->lock); 3760 } 3761 3762 workqueue_freezing = false; 3763 out_unlock: 3764 spin_unlock(&workqueue_lock); 3765 } 3766 #endif /* CONFIG_FREEZER */ 3767 3768 static int __init init_workqueues(void) 3769 { 3770 unsigned int cpu; 3771 int i; 3772 3773 cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE); 3774 3775 /* initialize gcwqs */ 3776 for_each_gcwq_cpu(cpu) { 3777 struct global_cwq *gcwq = get_gcwq(cpu); 3778 3779 spin_lock_init(&gcwq->lock); 3780 INIT_LIST_HEAD(&gcwq->worklist); 3781 gcwq->cpu = cpu; 3782 gcwq->flags |= GCWQ_DISASSOCIATED; 3783 3784 INIT_LIST_HEAD(&gcwq->idle_list); 3785 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) 3786 INIT_HLIST_HEAD(&gcwq->busy_hash[i]); 3787 3788 init_timer_deferrable(&gcwq->idle_timer); 3789 gcwq->idle_timer.function = idle_worker_timeout; 3790 gcwq->idle_timer.data = (unsigned long)gcwq; 3791 3792 setup_timer(&gcwq->mayday_timer, gcwq_mayday_timeout, 3793 (unsigned long)gcwq); 3794 3795 ida_init(&gcwq->worker_ida); 3796 3797 gcwq->trustee_state = TRUSTEE_DONE; 3798 init_waitqueue_head(&gcwq->trustee_wait); 3799 } 3800 3801 /* create the initial worker */ 3802 for_each_online_gcwq_cpu(cpu) { 3803 struct global_cwq *gcwq = get_gcwq(cpu); 3804 struct worker *worker; 3805 3806 if (cpu != WORK_CPU_UNBOUND) 3807 gcwq->flags &= ~GCWQ_DISASSOCIATED; 3808 worker = create_worker(gcwq, true); 3809 BUG_ON(!worker); 3810 spin_lock_irq(&gcwq->lock); 3811 start_worker(worker); 3812 spin_unlock_irq(&gcwq->lock); 3813 } 3814 3815 system_wq = alloc_workqueue("events", 0, 0); 3816 system_long_wq = alloc_workqueue("events_long", 0, 0); 3817 system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0); 3818 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, 3819 WQ_UNBOUND_MAX_ACTIVE); 3820 system_freezable_wq = alloc_workqueue("events_freezable", 3821 WQ_FREEZABLE, 0); 3822 system_nrt_freezable_wq = alloc_workqueue("events_nrt_freezable", 3823 WQ_NON_REENTRANT | WQ_FREEZABLE, 0); 3824 BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq || 3825 !system_unbound_wq || !system_freezable_wq || 3826 !system_nrt_freezable_wq); 3827 return 0; 3828 } 3829 early_initcall(init_workqueues); 3830