1 /* 2 * kernel/workqueue.c - generic async execution with shared worker pool 3 * 4 * Copyright (C) 2002 Ingo Molnar 5 * 6 * Derived from the taskqueue/keventd code by: 7 * David Woodhouse <dwmw2@infradead.org> 8 * Andrew Morton 9 * Kai Petzke <wpp@marie.physik.tu-berlin.de> 10 * Theodore Ts'o <tytso@mit.edu> 11 * 12 * Made to use alloc_percpu by Christoph Lameter. 13 * 14 * Copyright (C) 2010 SUSE Linux Products GmbH 15 * Copyright (C) 2010 Tejun Heo <tj@kernel.org> 16 * 17 * This is the generic async execution mechanism. Work items as are 18 * executed in process context. The worker pool is shared and 19 * automatically managed. There is one worker pool for each CPU and 20 * one extra for works which are better served by workers which are 21 * not bound to any specific CPU. 22 * 23 * Please read Documentation/workqueue.txt for details. 24 */ 25 26 #include <linux/export.h> 27 #include <linux/kernel.h> 28 #include <linux/sched.h> 29 #include <linux/init.h> 30 #include <linux/signal.h> 31 #include <linux/completion.h> 32 #include <linux/workqueue.h> 33 #include <linux/slab.h> 34 #include <linux/cpu.h> 35 #include <linux/notifier.h> 36 #include <linux/kthread.h> 37 #include <linux/hardirq.h> 38 #include <linux/mempolicy.h> 39 #include <linux/freezer.h> 40 #include <linux/kallsyms.h> 41 #include <linux/debug_locks.h> 42 #include <linux/lockdep.h> 43 #include <linux/idr.h> 44 45 #include "workqueue_sched.h" 46 47 enum { 48 /* 49 * global_cwq flags 50 * 51 * A bound gcwq is either associated or disassociated with its CPU. 52 * While associated (!DISASSOCIATED), all workers are bound to the 53 * CPU and none has %WORKER_UNBOUND set and concurrency management 54 * is in effect. 55 * 56 * While DISASSOCIATED, the cpu may be offline and all workers have 57 * %WORKER_UNBOUND set and concurrency management disabled, and may 58 * be executing on any CPU. The gcwq behaves as an unbound one. 59 * 60 * Note that DISASSOCIATED can be flipped only while holding 61 * assoc_mutex of all pools on the gcwq to avoid changing binding 62 * state while create_worker() is in progress. 63 */ 64 GCWQ_DISASSOCIATED = 1 << 0, /* cpu can't serve workers */ 65 GCWQ_FREEZING = 1 << 1, /* freeze in progress */ 66 67 /* pool flags */ 68 POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ 69 POOL_MANAGING_WORKERS = 1 << 1, /* managing workers */ 70 71 /* worker flags */ 72 WORKER_STARTED = 1 << 0, /* started */ 73 WORKER_DIE = 1 << 1, /* die die die */ 74 WORKER_IDLE = 1 << 2, /* is idle */ 75 WORKER_PREP = 1 << 3, /* preparing to run works */ 76 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ 77 WORKER_UNBOUND = 1 << 7, /* worker is unbound */ 78 79 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_UNBOUND | 80 WORKER_CPU_INTENSIVE, 81 82 NR_WORKER_POOLS = 2, /* # worker pools per gcwq */ 83 84 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ 85 BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER, 86 BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1, 87 88 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ 89 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ 90 91 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, 92 /* call for help after 10ms 93 (min two ticks) */ 94 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ 95 CREATE_COOLDOWN = HZ, /* time to breath after fail */ 96 97 /* 98 * Rescue workers are used only on emergencies and shared by 99 * all cpus. Give -20. 100 */ 101 RESCUER_NICE_LEVEL = -20, 102 HIGHPRI_NICE_LEVEL = -20, 103 }; 104 105 /* 106 * Structure fields follow one of the following exclusion rules. 107 * 108 * I: Modifiable by initialization/destruction paths and read-only for 109 * everyone else. 110 * 111 * P: Preemption protected. Disabling preemption is enough and should 112 * only be modified and accessed from the local cpu. 113 * 114 * L: gcwq->lock protected. Access with gcwq->lock held. 115 * 116 * X: During normal operation, modification requires gcwq->lock and 117 * should be done only from local cpu. Either disabling preemption 118 * on local cpu or grabbing gcwq->lock is enough for read access. 119 * If GCWQ_DISASSOCIATED is set, it's identical to L. 120 * 121 * F: wq->flush_mutex protected. 122 * 123 * W: workqueue_lock protected. 124 */ 125 126 struct global_cwq; 127 struct worker_pool; 128 129 /* 130 * The poor guys doing the actual heavy lifting. All on-duty workers 131 * are either serving the manager role, on idle list or on busy hash. 132 */ 133 struct worker { 134 /* on idle list while idle, on busy hash table while busy */ 135 union { 136 struct list_head entry; /* L: while idle */ 137 struct hlist_node hentry; /* L: while busy */ 138 }; 139 140 struct work_struct *current_work; /* L: work being processed */ 141 struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */ 142 struct list_head scheduled; /* L: scheduled works */ 143 struct task_struct *task; /* I: worker task */ 144 struct worker_pool *pool; /* I: the associated pool */ 145 /* 64 bytes boundary on 64bit, 32 on 32bit */ 146 unsigned long last_active; /* L: last active timestamp */ 147 unsigned int flags; /* X: flags */ 148 int id; /* I: worker id */ 149 150 /* for rebinding worker to CPU */ 151 struct work_struct rebind_work; /* L: for busy worker */ 152 }; 153 154 struct worker_pool { 155 struct global_cwq *gcwq; /* I: the owning gcwq */ 156 unsigned int flags; /* X: flags */ 157 158 struct list_head worklist; /* L: list of pending works */ 159 int nr_workers; /* L: total number of workers */ 160 161 /* nr_idle includes the ones off idle_list for rebinding */ 162 int nr_idle; /* L: currently idle ones */ 163 164 struct list_head idle_list; /* X: list of idle workers */ 165 struct timer_list idle_timer; /* L: worker idle timeout */ 166 struct timer_list mayday_timer; /* L: SOS timer for workers */ 167 168 struct mutex assoc_mutex; /* protect GCWQ_DISASSOCIATED */ 169 struct ida worker_ida; /* L: for worker IDs */ 170 }; 171 172 /* 173 * Global per-cpu workqueue. There's one and only one for each cpu 174 * and all works are queued and processed here regardless of their 175 * target workqueues. 176 */ 177 struct global_cwq { 178 spinlock_t lock; /* the gcwq lock */ 179 unsigned int cpu; /* I: the associated cpu */ 180 unsigned int flags; /* L: GCWQ_* flags */ 181 182 /* workers are chained either in busy_hash or pool idle_list */ 183 struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE]; 184 /* L: hash of busy workers */ 185 186 struct worker_pool pools[NR_WORKER_POOLS]; 187 /* normal and highpri pools */ 188 } ____cacheline_aligned_in_smp; 189 190 /* 191 * The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of 192 * work_struct->data are used for flags and thus cwqs need to be 193 * aligned at two's power of the number of flag bits. 194 */ 195 struct cpu_workqueue_struct { 196 struct worker_pool *pool; /* I: the associated pool */ 197 struct workqueue_struct *wq; /* I: the owning workqueue */ 198 int work_color; /* L: current color */ 199 int flush_color; /* L: flushing color */ 200 int nr_in_flight[WORK_NR_COLORS]; 201 /* L: nr of in_flight works */ 202 int nr_active; /* L: nr of active works */ 203 int max_active; /* L: max active works */ 204 struct list_head delayed_works; /* L: delayed works */ 205 }; 206 207 /* 208 * Structure used to wait for workqueue flush. 209 */ 210 struct wq_flusher { 211 struct list_head list; /* F: list of flushers */ 212 int flush_color; /* F: flush color waiting for */ 213 struct completion done; /* flush completion */ 214 }; 215 216 /* 217 * All cpumasks are assumed to be always set on UP and thus can't be 218 * used to determine whether there's something to be done. 219 */ 220 #ifdef CONFIG_SMP 221 typedef cpumask_var_t mayday_mask_t; 222 #define mayday_test_and_set_cpu(cpu, mask) \ 223 cpumask_test_and_set_cpu((cpu), (mask)) 224 #define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask)) 225 #define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask)) 226 #define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp)) 227 #define free_mayday_mask(mask) free_cpumask_var((mask)) 228 #else 229 typedef unsigned long mayday_mask_t; 230 #define mayday_test_and_set_cpu(cpu, mask) test_and_set_bit(0, &(mask)) 231 #define mayday_clear_cpu(cpu, mask) clear_bit(0, &(mask)) 232 #define for_each_mayday_cpu(cpu, mask) if ((cpu) = 0, (mask)) 233 #define alloc_mayday_mask(maskp, gfp) true 234 #define free_mayday_mask(mask) do { } while (0) 235 #endif 236 237 /* 238 * The externally visible workqueue abstraction is an array of 239 * per-CPU workqueues: 240 */ 241 struct workqueue_struct { 242 unsigned int flags; /* W: WQ_* flags */ 243 union { 244 struct cpu_workqueue_struct __percpu *pcpu; 245 struct cpu_workqueue_struct *single; 246 unsigned long v; 247 } cpu_wq; /* I: cwq's */ 248 struct list_head list; /* W: list of all workqueues */ 249 250 struct mutex flush_mutex; /* protects wq flushing */ 251 int work_color; /* F: current work color */ 252 int flush_color; /* F: current flush color */ 253 atomic_t nr_cwqs_to_flush; /* flush in progress */ 254 struct wq_flusher *first_flusher; /* F: first flusher */ 255 struct list_head flusher_queue; /* F: flush waiters */ 256 struct list_head flusher_overflow; /* F: flush overflow list */ 257 258 mayday_mask_t mayday_mask; /* cpus requesting rescue */ 259 struct worker *rescuer; /* I: rescue worker */ 260 261 int nr_drainers; /* W: drain in progress */ 262 int saved_max_active; /* W: saved cwq max_active */ 263 #ifdef CONFIG_LOCKDEP 264 struct lockdep_map lockdep_map; 265 #endif 266 char name[]; /* I: workqueue name */ 267 }; 268 269 struct workqueue_struct *system_wq __read_mostly; 270 EXPORT_SYMBOL_GPL(system_wq); 271 struct workqueue_struct *system_highpri_wq __read_mostly; 272 EXPORT_SYMBOL_GPL(system_highpri_wq); 273 struct workqueue_struct *system_long_wq __read_mostly; 274 EXPORT_SYMBOL_GPL(system_long_wq); 275 struct workqueue_struct *system_unbound_wq __read_mostly; 276 EXPORT_SYMBOL_GPL(system_unbound_wq); 277 struct workqueue_struct *system_freezable_wq __read_mostly; 278 EXPORT_SYMBOL_GPL(system_freezable_wq); 279 280 #define CREATE_TRACE_POINTS 281 #include <trace/events/workqueue.h> 282 283 #define for_each_worker_pool(pool, gcwq) \ 284 for ((pool) = &(gcwq)->pools[0]; \ 285 (pool) < &(gcwq)->pools[NR_WORKER_POOLS]; (pool)++) 286 287 #define for_each_busy_worker(worker, i, pos, gcwq) \ 288 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \ 289 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry) 290 291 static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask, 292 unsigned int sw) 293 { 294 if (cpu < nr_cpu_ids) { 295 if (sw & 1) { 296 cpu = cpumask_next(cpu, mask); 297 if (cpu < nr_cpu_ids) 298 return cpu; 299 } 300 if (sw & 2) 301 return WORK_CPU_UNBOUND; 302 } 303 return WORK_CPU_NONE; 304 } 305 306 static inline int __next_wq_cpu(int cpu, const struct cpumask *mask, 307 struct workqueue_struct *wq) 308 { 309 return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2); 310 } 311 312 /* 313 * CPU iterators 314 * 315 * An extra gcwq is defined for an invalid cpu number 316 * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any 317 * specific CPU. The following iterators are similar to 318 * for_each_*_cpu() iterators but also considers the unbound gcwq. 319 * 320 * for_each_gcwq_cpu() : possible CPUs + WORK_CPU_UNBOUND 321 * for_each_online_gcwq_cpu() : online CPUs + WORK_CPU_UNBOUND 322 * for_each_cwq_cpu() : possible CPUs for bound workqueues, 323 * WORK_CPU_UNBOUND for unbound workqueues 324 */ 325 #define for_each_gcwq_cpu(cpu) \ 326 for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3); \ 327 (cpu) < WORK_CPU_NONE; \ 328 (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3)) 329 330 #define for_each_online_gcwq_cpu(cpu) \ 331 for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3); \ 332 (cpu) < WORK_CPU_NONE; \ 333 (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3)) 334 335 #define for_each_cwq_cpu(cpu, wq) \ 336 for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq)); \ 337 (cpu) < WORK_CPU_NONE; \ 338 (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq))) 339 340 #ifdef CONFIG_DEBUG_OBJECTS_WORK 341 342 static struct debug_obj_descr work_debug_descr; 343 344 static void *work_debug_hint(void *addr) 345 { 346 return ((struct work_struct *) addr)->func; 347 } 348 349 /* 350 * fixup_init is called when: 351 * - an active object is initialized 352 */ 353 static int work_fixup_init(void *addr, enum debug_obj_state state) 354 { 355 struct work_struct *work = addr; 356 357 switch (state) { 358 case ODEBUG_STATE_ACTIVE: 359 cancel_work_sync(work); 360 debug_object_init(work, &work_debug_descr); 361 return 1; 362 default: 363 return 0; 364 } 365 } 366 367 /* 368 * fixup_activate is called when: 369 * - an active object is activated 370 * - an unknown object is activated (might be a statically initialized object) 371 */ 372 static int work_fixup_activate(void *addr, enum debug_obj_state state) 373 { 374 struct work_struct *work = addr; 375 376 switch (state) { 377 378 case ODEBUG_STATE_NOTAVAILABLE: 379 /* 380 * This is not really a fixup. The work struct was 381 * statically initialized. We just make sure that it 382 * is tracked in the object tracker. 383 */ 384 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) { 385 debug_object_init(work, &work_debug_descr); 386 debug_object_activate(work, &work_debug_descr); 387 return 0; 388 } 389 WARN_ON_ONCE(1); 390 return 0; 391 392 case ODEBUG_STATE_ACTIVE: 393 WARN_ON(1); 394 395 default: 396 return 0; 397 } 398 } 399 400 /* 401 * fixup_free is called when: 402 * - an active object is freed 403 */ 404 static int work_fixup_free(void *addr, enum debug_obj_state state) 405 { 406 struct work_struct *work = addr; 407 408 switch (state) { 409 case ODEBUG_STATE_ACTIVE: 410 cancel_work_sync(work); 411 debug_object_free(work, &work_debug_descr); 412 return 1; 413 default: 414 return 0; 415 } 416 } 417 418 static struct debug_obj_descr work_debug_descr = { 419 .name = "work_struct", 420 .debug_hint = work_debug_hint, 421 .fixup_init = work_fixup_init, 422 .fixup_activate = work_fixup_activate, 423 .fixup_free = work_fixup_free, 424 }; 425 426 static inline void debug_work_activate(struct work_struct *work) 427 { 428 debug_object_activate(work, &work_debug_descr); 429 } 430 431 static inline void debug_work_deactivate(struct work_struct *work) 432 { 433 debug_object_deactivate(work, &work_debug_descr); 434 } 435 436 void __init_work(struct work_struct *work, int onstack) 437 { 438 if (onstack) 439 debug_object_init_on_stack(work, &work_debug_descr); 440 else 441 debug_object_init(work, &work_debug_descr); 442 } 443 EXPORT_SYMBOL_GPL(__init_work); 444 445 void destroy_work_on_stack(struct work_struct *work) 446 { 447 debug_object_free(work, &work_debug_descr); 448 } 449 EXPORT_SYMBOL_GPL(destroy_work_on_stack); 450 451 #else 452 static inline void debug_work_activate(struct work_struct *work) { } 453 static inline void debug_work_deactivate(struct work_struct *work) { } 454 #endif 455 456 /* Serializes the accesses to the list of workqueues. */ 457 static DEFINE_SPINLOCK(workqueue_lock); 458 static LIST_HEAD(workqueues); 459 static bool workqueue_freezing; /* W: have wqs started freezing? */ 460 461 /* 462 * The almighty global cpu workqueues. nr_running is the only field 463 * which is expected to be used frequently by other cpus via 464 * try_to_wake_up(). Put it in a separate cacheline. 465 */ 466 static DEFINE_PER_CPU(struct global_cwq, global_cwq); 467 static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, pool_nr_running[NR_WORKER_POOLS]); 468 469 /* 470 * Global cpu workqueue and nr_running counter for unbound gcwq. The 471 * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its 472 * workers have WORKER_UNBOUND set. 473 */ 474 static struct global_cwq unbound_global_cwq; 475 static atomic_t unbound_pool_nr_running[NR_WORKER_POOLS] = { 476 [0 ... NR_WORKER_POOLS - 1] = ATOMIC_INIT(0), /* always 0 */ 477 }; 478 479 static int worker_thread(void *__worker); 480 481 static int worker_pool_pri(struct worker_pool *pool) 482 { 483 return pool - pool->gcwq->pools; 484 } 485 486 static struct global_cwq *get_gcwq(unsigned int cpu) 487 { 488 if (cpu != WORK_CPU_UNBOUND) 489 return &per_cpu(global_cwq, cpu); 490 else 491 return &unbound_global_cwq; 492 } 493 494 static atomic_t *get_pool_nr_running(struct worker_pool *pool) 495 { 496 int cpu = pool->gcwq->cpu; 497 int idx = worker_pool_pri(pool); 498 499 if (cpu != WORK_CPU_UNBOUND) 500 return &per_cpu(pool_nr_running, cpu)[idx]; 501 else 502 return &unbound_pool_nr_running[idx]; 503 } 504 505 static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, 506 struct workqueue_struct *wq) 507 { 508 if (!(wq->flags & WQ_UNBOUND)) { 509 if (likely(cpu < nr_cpu_ids)) 510 return per_cpu_ptr(wq->cpu_wq.pcpu, cpu); 511 } else if (likely(cpu == WORK_CPU_UNBOUND)) 512 return wq->cpu_wq.single; 513 return NULL; 514 } 515 516 static unsigned int work_color_to_flags(int color) 517 { 518 return color << WORK_STRUCT_COLOR_SHIFT; 519 } 520 521 static int get_work_color(struct work_struct *work) 522 { 523 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) & 524 ((1 << WORK_STRUCT_COLOR_BITS) - 1); 525 } 526 527 static int work_next_color(int color) 528 { 529 return (color + 1) % WORK_NR_COLORS; 530 } 531 532 /* 533 * While queued, %WORK_STRUCT_CWQ is set and non flag bits of a work's data 534 * contain the pointer to the queued cwq. Once execution starts, the flag 535 * is cleared and the high bits contain OFFQ flags and CPU number. 536 * 537 * set_work_cwq(), set_work_cpu_and_clear_pending(), mark_work_canceling() 538 * and clear_work_data() can be used to set the cwq, cpu or clear 539 * work->data. These functions should only be called while the work is 540 * owned - ie. while the PENDING bit is set. 541 * 542 * get_work_[g]cwq() can be used to obtain the gcwq or cwq corresponding to 543 * a work. gcwq is available once the work has been queued anywhere after 544 * initialization until it is sync canceled. cwq is available only while 545 * the work item is queued. 546 * 547 * %WORK_OFFQ_CANCELING is used to mark a work item which is being 548 * canceled. While being canceled, a work item may have its PENDING set 549 * but stay off timer and worklist for arbitrarily long and nobody should 550 * try to steal the PENDING bit. 551 */ 552 static inline void set_work_data(struct work_struct *work, unsigned long data, 553 unsigned long flags) 554 { 555 BUG_ON(!work_pending(work)); 556 atomic_long_set(&work->data, data | flags | work_static(work)); 557 } 558 559 static void set_work_cwq(struct work_struct *work, 560 struct cpu_workqueue_struct *cwq, 561 unsigned long extra_flags) 562 { 563 set_work_data(work, (unsigned long)cwq, 564 WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags); 565 } 566 567 static void set_work_cpu_and_clear_pending(struct work_struct *work, 568 unsigned int cpu) 569 { 570 /* 571 * The following wmb is paired with the implied mb in 572 * test_and_set_bit(PENDING) and ensures all updates to @work made 573 * here are visible to and precede any updates by the next PENDING 574 * owner. 575 */ 576 smp_wmb(); 577 set_work_data(work, (unsigned long)cpu << WORK_OFFQ_CPU_SHIFT, 0); 578 } 579 580 static void clear_work_data(struct work_struct *work) 581 { 582 smp_wmb(); /* see set_work_cpu_and_clear_pending() */ 583 set_work_data(work, WORK_STRUCT_NO_CPU, 0); 584 } 585 586 static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work) 587 { 588 unsigned long data = atomic_long_read(&work->data); 589 590 if (data & WORK_STRUCT_CWQ) 591 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK); 592 else 593 return NULL; 594 } 595 596 static struct global_cwq *get_work_gcwq(struct work_struct *work) 597 { 598 unsigned long data = atomic_long_read(&work->data); 599 unsigned int cpu; 600 601 if (data & WORK_STRUCT_CWQ) 602 return ((struct cpu_workqueue_struct *) 603 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->gcwq; 604 605 cpu = data >> WORK_OFFQ_CPU_SHIFT; 606 if (cpu == WORK_CPU_NONE) 607 return NULL; 608 609 BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND); 610 return get_gcwq(cpu); 611 } 612 613 static void mark_work_canceling(struct work_struct *work) 614 { 615 struct global_cwq *gcwq = get_work_gcwq(work); 616 unsigned long cpu = gcwq ? gcwq->cpu : WORK_CPU_NONE; 617 618 set_work_data(work, (cpu << WORK_OFFQ_CPU_SHIFT) | WORK_OFFQ_CANCELING, 619 WORK_STRUCT_PENDING); 620 } 621 622 static bool work_is_canceling(struct work_struct *work) 623 { 624 unsigned long data = atomic_long_read(&work->data); 625 626 return !(data & WORK_STRUCT_CWQ) && (data & WORK_OFFQ_CANCELING); 627 } 628 629 /* 630 * Policy functions. These define the policies on how the global worker 631 * pools are managed. Unless noted otherwise, these functions assume that 632 * they're being called with gcwq->lock held. 633 */ 634 635 static bool __need_more_worker(struct worker_pool *pool) 636 { 637 return !atomic_read(get_pool_nr_running(pool)); 638 } 639 640 /* 641 * Need to wake up a worker? Called from anything but currently 642 * running workers. 643 * 644 * Note that, because unbound workers never contribute to nr_running, this 645 * function will always return %true for unbound gcwq as long as the 646 * worklist isn't empty. 647 */ 648 static bool need_more_worker(struct worker_pool *pool) 649 { 650 return !list_empty(&pool->worklist) && __need_more_worker(pool); 651 } 652 653 /* Can I start working? Called from busy but !running workers. */ 654 static bool may_start_working(struct worker_pool *pool) 655 { 656 return pool->nr_idle; 657 } 658 659 /* Do I need to keep working? Called from currently running workers. */ 660 static bool keep_working(struct worker_pool *pool) 661 { 662 atomic_t *nr_running = get_pool_nr_running(pool); 663 664 return !list_empty(&pool->worklist) && atomic_read(nr_running) <= 1; 665 } 666 667 /* Do we need a new worker? Called from manager. */ 668 static bool need_to_create_worker(struct worker_pool *pool) 669 { 670 return need_more_worker(pool) && !may_start_working(pool); 671 } 672 673 /* Do I need to be the manager? */ 674 static bool need_to_manage_workers(struct worker_pool *pool) 675 { 676 return need_to_create_worker(pool) || 677 (pool->flags & POOL_MANAGE_WORKERS); 678 } 679 680 /* Do we have too many workers and should some go away? */ 681 static bool too_many_workers(struct worker_pool *pool) 682 { 683 bool managing = pool->flags & POOL_MANAGING_WORKERS; 684 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ 685 int nr_busy = pool->nr_workers - nr_idle; 686 687 /* 688 * nr_idle and idle_list may disagree if idle rebinding is in 689 * progress. Never return %true if idle_list is empty. 690 */ 691 if (list_empty(&pool->idle_list)) 692 return false; 693 694 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; 695 } 696 697 /* 698 * Wake up functions. 699 */ 700 701 /* Return the first worker. Safe with preemption disabled */ 702 static struct worker *first_worker(struct worker_pool *pool) 703 { 704 if (unlikely(list_empty(&pool->idle_list))) 705 return NULL; 706 707 return list_first_entry(&pool->idle_list, struct worker, entry); 708 } 709 710 /** 711 * wake_up_worker - wake up an idle worker 712 * @pool: worker pool to wake worker from 713 * 714 * Wake up the first idle worker of @pool. 715 * 716 * CONTEXT: 717 * spin_lock_irq(gcwq->lock). 718 */ 719 static void wake_up_worker(struct worker_pool *pool) 720 { 721 struct worker *worker = first_worker(pool); 722 723 if (likely(worker)) 724 wake_up_process(worker->task); 725 } 726 727 /** 728 * wq_worker_waking_up - a worker is waking up 729 * @task: task waking up 730 * @cpu: CPU @task is waking up to 731 * 732 * This function is called during try_to_wake_up() when a worker is 733 * being awoken. 734 * 735 * CONTEXT: 736 * spin_lock_irq(rq->lock) 737 */ 738 void wq_worker_waking_up(struct task_struct *task, unsigned int cpu) 739 { 740 struct worker *worker = kthread_data(task); 741 742 if (!(worker->flags & WORKER_NOT_RUNNING)) 743 atomic_inc(get_pool_nr_running(worker->pool)); 744 } 745 746 /** 747 * wq_worker_sleeping - a worker is going to sleep 748 * @task: task going to sleep 749 * @cpu: CPU in question, must be the current CPU number 750 * 751 * This function is called during schedule() when a busy worker is 752 * going to sleep. Worker on the same cpu can be woken up by 753 * returning pointer to its task. 754 * 755 * CONTEXT: 756 * spin_lock_irq(rq->lock) 757 * 758 * RETURNS: 759 * Worker task on @cpu to wake up, %NULL if none. 760 */ 761 struct task_struct *wq_worker_sleeping(struct task_struct *task, 762 unsigned int cpu) 763 { 764 struct worker *worker = kthread_data(task), *to_wakeup = NULL; 765 struct worker_pool *pool = worker->pool; 766 atomic_t *nr_running = get_pool_nr_running(pool); 767 768 if (worker->flags & WORKER_NOT_RUNNING) 769 return NULL; 770 771 /* this can only happen on the local cpu */ 772 BUG_ON(cpu != raw_smp_processor_id()); 773 774 /* 775 * The counterpart of the following dec_and_test, implied mb, 776 * worklist not empty test sequence is in insert_work(). 777 * Please read comment there. 778 * 779 * NOT_RUNNING is clear. This means that we're bound to and 780 * running on the local cpu w/ rq lock held and preemption 781 * disabled, which in turn means that none else could be 782 * manipulating idle_list, so dereferencing idle_list without gcwq 783 * lock is safe. 784 */ 785 if (atomic_dec_and_test(nr_running) && !list_empty(&pool->worklist)) 786 to_wakeup = first_worker(pool); 787 return to_wakeup ? to_wakeup->task : NULL; 788 } 789 790 /** 791 * worker_set_flags - set worker flags and adjust nr_running accordingly 792 * @worker: self 793 * @flags: flags to set 794 * @wakeup: wakeup an idle worker if necessary 795 * 796 * Set @flags in @worker->flags and adjust nr_running accordingly. If 797 * nr_running becomes zero and @wakeup is %true, an idle worker is 798 * woken up. 799 * 800 * CONTEXT: 801 * spin_lock_irq(gcwq->lock) 802 */ 803 static inline void worker_set_flags(struct worker *worker, unsigned int flags, 804 bool wakeup) 805 { 806 struct worker_pool *pool = worker->pool; 807 808 WARN_ON_ONCE(worker->task != current); 809 810 /* 811 * If transitioning into NOT_RUNNING, adjust nr_running and 812 * wake up an idle worker as necessary if requested by 813 * @wakeup. 814 */ 815 if ((flags & WORKER_NOT_RUNNING) && 816 !(worker->flags & WORKER_NOT_RUNNING)) { 817 atomic_t *nr_running = get_pool_nr_running(pool); 818 819 if (wakeup) { 820 if (atomic_dec_and_test(nr_running) && 821 !list_empty(&pool->worklist)) 822 wake_up_worker(pool); 823 } else 824 atomic_dec(nr_running); 825 } 826 827 worker->flags |= flags; 828 } 829 830 /** 831 * worker_clr_flags - clear worker flags and adjust nr_running accordingly 832 * @worker: self 833 * @flags: flags to clear 834 * 835 * Clear @flags in @worker->flags and adjust nr_running accordingly. 836 * 837 * CONTEXT: 838 * spin_lock_irq(gcwq->lock) 839 */ 840 static inline void worker_clr_flags(struct worker *worker, unsigned int flags) 841 { 842 struct worker_pool *pool = worker->pool; 843 unsigned int oflags = worker->flags; 844 845 WARN_ON_ONCE(worker->task != current); 846 847 worker->flags &= ~flags; 848 849 /* 850 * If transitioning out of NOT_RUNNING, increment nr_running. Note 851 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask 852 * of multiple flags, not a single flag. 853 */ 854 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) 855 if (!(worker->flags & WORKER_NOT_RUNNING)) 856 atomic_inc(get_pool_nr_running(pool)); 857 } 858 859 /** 860 * busy_worker_head - return the busy hash head for a work 861 * @gcwq: gcwq of interest 862 * @work: work to be hashed 863 * 864 * Return hash head of @gcwq for @work. 865 * 866 * CONTEXT: 867 * spin_lock_irq(gcwq->lock). 868 * 869 * RETURNS: 870 * Pointer to the hash head. 871 */ 872 static struct hlist_head *busy_worker_head(struct global_cwq *gcwq, 873 struct work_struct *work) 874 { 875 const int base_shift = ilog2(sizeof(struct work_struct)); 876 unsigned long v = (unsigned long)work; 877 878 /* simple shift and fold hash, do we need something better? */ 879 v >>= base_shift; 880 v += v >> BUSY_WORKER_HASH_ORDER; 881 v &= BUSY_WORKER_HASH_MASK; 882 883 return &gcwq->busy_hash[v]; 884 } 885 886 /** 887 * __find_worker_executing_work - find worker which is executing a work 888 * @gcwq: gcwq of interest 889 * @bwh: hash head as returned by busy_worker_head() 890 * @work: work to find worker for 891 * 892 * Find a worker which is executing @work on @gcwq. @bwh should be 893 * the hash head obtained by calling busy_worker_head() with the same 894 * work. 895 * 896 * CONTEXT: 897 * spin_lock_irq(gcwq->lock). 898 * 899 * RETURNS: 900 * Pointer to worker which is executing @work if found, NULL 901 * otherwise. 902 */ 903 static struct worker *__find_worker_executing_work(struct global_cwq *gcwq, 904 struct hlist_head *bwh, 905 struct work_struct *work) 906 { 907 struct worker *worker; 908 struct hlist_node *tmp; 909 910 hlist_for_each_entry(worker, tmp, bwh, hentry) 911 if (worker->current_work == work) 912 return worker; 913 return NULL; 914 } 915 916 /** 917 * find_worker_executing_work - find worker which is executing a work 918 * @gcwq: gcwq of interest 919 * @work: work to find worker for 920 * 921 * Find a worker which is executing @work on @gcwq. This function is 922 * identical to __find_worker_executing_work() except that this 923 * function calculates @bwh itself. 924 * 925 * CONTEXT: 926 * spin_lock_irq(gcwq->lock). 927 * 928 * RETURNS: 929 * Pointer to worker which is executing @work if found, NULL 930 * otherwise. 931 */ 932 static struct worker *find_worker_executing_work(struct global_cwq *gcwq, 933 struct work_struct *work) 934 { 935 return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work), 936 work); 937 } 938 939 /** 940 * move_linked_works - move linked works to a list 941 * @work: start of series of works to be scheduled 942 * @head: target list to append @work to 943 * @nextp: out paramter for nested worklist walking 944 * 945 * Schedule linked works starting from @work to @head. Work series to 946 * be scheduled starts at @work and includes any consecutive work with 947 * WORK_STRUCT_LINKED set in its predecessor. 948 * 949 * If @nextp is not NULL, it's updated to point to the next work of 950 * the last scheduled work. This allows move_linked_works() to be 951 * nested inside outer list_for_each_entry_safe(). 952 * 953 * CONTEXT: 954 * spin_lock_irq(gcwq->lock). 955 */ 956 static void move_linked_works(struct work_struct *work, struct list_head *head, 957 struct work_struct **nextp) 958 { 959 struct work_struct *n; 960 961 /* 962 * Linked worklist will always end before the end of the list, 963 * use NULL for list head. 964 */ 965 list_for_each_entry_safe_from(work, n, NULL, entry) { 966 list_move_tail(&work->entry, head); 967 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 968 break; 969 } 970 971 /* 972 * If we're already inside safe list traversal and have moved 973 * multiple works to the scheduled queue, the next position 974 * needs to be updated. 975 */ 976 if (nextp) 977 *nextp = n; 978 } 979 980 static void cwq_activate_delayed_work(struct work_struct *work) 981 { 982 struct cpu_workqueue_struct *cwq = get_work_cwq(work); 983 984 trace_workqueue_activate_work(work); 985 move_linked_works(work, &cwq->pool->worklist, NULL); 986 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); 987 cwq->nr_active++; 988 } 989 990 static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) 991 { 992 struct work_struct *work = list_first_entry(&cwq->delayed_works, 993 struct work_struct, entry); 994 995 cwq_activate_delayed_work(work); 996 } 997 998 /** 999 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight 1000 * @cwq: cwq of interest 1001 * @color: color of work which left the queue 1002 * 1003 * A work either has completed or is removed from pending queue, 1004 * decrement nr_in_flight of its cwq and handle workqueue flushing. 1005 * 1006 * CONTEXT: 1007 * spin_lock_irq(gcwq->lock). 1008 */ 1009 static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) 1010 { 1011 /* ignore uncolored works */ 1012 if (color == WORK_NO_COLOR) 1013 return; 1014 1015 cwq->nr_in_flight[color]--; 1016 1017 cwq->nr_active--; 1018 if (!list_empty(&cwq->delayed_works)) { 1019 /* one down, submit a delayed one */ 1020 if (cwq->nr_active < cwq->max_active) 1021 cwq_activate_first_delayed(cwq); 1022 } 1023 1024 /* is flush in progress and are we at the flushing tip? */ 1025 if (likely(cwq->flush_color != color)) 1026 return; 1027 1028 /* are there still in-flight works? */ 1029 if (cwq->nr_in_flight[color]) 1030 return; 1031 1032 /* this cwq is done, clear flush_color */ 1033 cwq->flush_color = -1; 1034 1035 /* 1036 * If this was the last cwq, wake up the first flusher. It 1037 * will handle the rest. 1038 */ 1039 if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush)) 1040 complete(&cwq->wq->first_flusher->done); 1041 } 1042 1043 /** 1044 * try_to_grab_pending - steal work item from worklist and disable irq 1045 * @work: work item to steal 1046 * @is_dwork: @work is a delayed_work 1047 * @flags: place to store irq state 1048 * 1049 * Try to grab PENDING bit of @work. This function can handle @work in any 1050 * stable state - idle, on timer or on worklist. Return values are 1051 * 1052 * 1 if @work was pending and we successfully stole PENDING 1053 * 0 if @work was idle and we claimed PENDING 1054 * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry 1055 * -ENOENT if someone else is canceling @work, this state may persist 1056 * for arbitrarily long 1057 * 1058 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting 1059 * interrupted while holding PENDING and @work off queue, irq must be 1060 * disabled on entry. This, combined with delayed_work->timer being 1061 * irqsafe, ensures that we return -EAGAIN for finite short period of time. 1062 * 1063 * On successful return, >= 0, irq is disabled and the caller is 1064 * responsible for releasing it using local_irq_restore(*@flags). 1065 * 1066 * This function is safe to call from any context including IRQ handler. 1067 */ 1068 static int try_to_grab_pending(struct work_struct *work, bool is_dwork, 1069 unsigned long *flags) 1070 { 1071 struct global_cwq *gcwq; 1072 1073 local_irq_save(*flags); 1074 1075 /* try to steal the timer if it exists */ 1076 if (is_dwork) { 1077 struct delayed_work *dwork = to_delayed_work(work); 1078 1079 /* 1080 * dwork->timer is irqsafe. If del_timer() fails, it's 1081 * guaranteed that the timer is not queued anywhere and not 1082 * running on the local CPU. 1083 */ 1084 if (likely(del_timer(&dwork->timer))) 1085 return 1; 1086 } 1087 1088 /* try to claim PENDING the normal way */ 1089 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) 1090 return 0; 1091 1092 /* 1093 * The queueing is in progress, or it is already queued. Try to 1094 * steal it from ->worklist without clearing WORK_STRUCT_PENDING. 1095 */ 1096 gcwq = get_work_gcwq(work); 1097 if (!gcwq) 1098 goto fail; 1099 1100 spin_lock(&gcwq->lock); 1101 if (!list_empty(&work->entry)) { 1102 /* 1103 * This work is queued, but perhaps we locked the wrong gcwq. 1104 * In that case we must see the new value after rmb(), see 1105 * insert_work()->wmb(). 1106 */ 1107 smp_rmb(); 1108 if (gcwq == get_work_gcwq(work)) { 1109 debug_work_deactivate(work); 1110 1111 /* 1112 * A delayed work item cannot be grabbed directly 1113 * because it might have linked NO_COLOR work items 1114 * which, if left on the delayed_list, will confuse 1115 * cwq->nr_active management later on and cause 1116 * stall. Make sure the work item is activated 1117 * before grabbing. 1118 */ 1119 if (*work_data_bits(work) & WORK_STRUCT_DELAYED) 1120 cwq_activate_delayed_work(work); 1121 1122 list_del_init(&work->entry); 1123 cwq_dec_nr_in_flight(get_work_cwq(work), 1124 get_work_color(work)); 1125 1126 spin_unlock(&gcwq->lock); 1127 return 1; 1128 } 1129 } 1130 spin_unlock(&gcwq->lock); 1131 fail: 1132 local_irq_restore(*flags); 1133 if (work_is_canceling(work)) 1134 return -ENOENT; 1135 cpu_relax(); 1136 return -EAGAIN; 1137 } 1138 1139 /** 1140 * insert_work - insert a work into gcwq 1141 * @cwq: cwq @work belongs to 1142 * @work: work to insert 1143 * @head: insertion point 1144 * @extra_flags: extra WORK_STRUCT_* flags to set 1145 * 1146 * Insert @work which belongs to @cwq into @gcwq after @head. 1147 * @extra_flags is or'd to work_struct flags. 1148 * 1149 * CONTEXT: 1150 * spin_lock_irq(gcwq->lock). 1151 */ 1152 static void insert_work(struct cpu_workqueue_struct *cwq, 1153 struct work_struct *work, struct list_head *head, 1154 unsigned int extra_flags) 1155 { 1156 struct worker_pool *pool = cwq->pool; 1157 1158 /* we own @work, set data and link */ 1159 set_work_cwq(work, cwq, extra_flags); 1160 1161 /* 1162 * Ensure that we get the right work->data if we see the 1163 * result of list_add() below, see try_to_grab_pending(). 1164 */ 1165 smp_wmb(); 1166 1167 list_add_tail(&work->entry, head); 1168 1169 /* 1170 * Ensure either worker_sched_deactivated() sees the above 1171 * list_add_tail() or we see zero nr_running to avoid workers 1172 * lying around lazily while there are works to be processed. 1173 */ 1174 smp_mb(); 1175 1176 if (__need_more_worker(pool)) 1177 wake_up_worker(pool); 1178 } 1179 1180 /* 1181 * Test whether @work is being queued from another work executing on the 1182 * same workqueue. This is rather expensive and should only be used from 1183 * cold paths. 1184 */ 1185 static bool is_chained_work(struct workqueue_struct *wq) 1186 { 1187 unsigned long flags; 1188 unsigned int cpu; 1189 1190 for_each_gcwq_cpu(cpu) { 1191 struct global_cwq *gcwq = get_gcwq(cpu); 1192 struct worker *worker; 1193 struct hlist_node *pos; 1194 int i; 1195 1196 spin_lock_irqsave(&gcwq->lock, flags); 1197 for_each_busy_worker(worker, i, pos, gcwq) { 1198 if (worker->task != current) 1199 continue; 1200 spin_unlock_irqrestore(&gcwq->lock, flags); 1201 /* 1202 * I'm @worker, no locking necessary. See if @work 1203 * is headed to the same workqueue. 1204 */ 1205 return worker->current_cwq->wq == wq; 1206 } 1207 spin_unlock_irqrestore(&gcwq->lock, flags); 1208 } 1209 return false; 1210 } 1211 1212 static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, 1213 struct work_struct *work) 1214 { 1215 struct global_cwq *gcwq; 1216 struct cpu_workqueue_struct *cwq; 1217 struct list_head *worklist; 1218 unsigned int work_flags; 1219 unsigned int req_cpu = cpu; 1220 1221 /* 1222 * While a work item is PENDING && off queue, a task trying to 1223 * steal the PENDING will busy-loop waiting for it to either get 1224 * queued or lose PENDING. Grabbing PENDING and queueing should 1225 * happen with IRQ disabled. 1226 */ 1227 WARN_ON_ONCE(!irqs_disabled()); 1228 1229 debug_work_activate(work); 1230 1231 /* if dying, only works from the same workqueue are allowed */ 1232 if (unlikely(wq->flags & WQ_DRAINING) && 1233 WARN_ON_ONCE(!is_chained_work(wq))) 1234 return; 1235 1236 /* determine gcwq to use */ 1237 if (!(wq->flags & WQ_UNBOUND)) { 1238 struct global_cwq *last_gcwq; 1239 1240 if (cpu == WORK_CPU_UNBOUND) 1241 cpu = raw_smp_processor_id(); 1242 1243 /* 1244 * It's multi cpu. If @work was previously on a different 1245 * cpu, it might still be running there, in which case the 1246 * work needs to be queued on that cpu to guarantee 1247 * non-reentrancy. 1248 */ 1249 gcwq = get_gcwq(cpu); 1250 last_gcwq = get_work_gcwq(work); 1251 1252 if (last_gcwq && last_gcwq != gcwq) { 1253 struct worker *worker; 1254 1255 spin_lock(&last_gcwq->lock); 1256 1257 worker = find_worker_executing_work(last_gcwq, work); 1258 1259 if (worker && worker->current_cwq->wq == wq) 1260 gcwq = last_gcwq; 1261 else { 1262 /* meh... not running there, queue here */ 1263 spin_unlock(&last_gcwq->lock); 1264 spin_lock(&gcwq->lock); 1265 } 1266 } else { 1267 spin_lock(&gcwq->lock); 1268 } 1269 } else { 1270 gcwq = get_gcwq(WORK_CPU_UNBOUND); 1271 spin_lock(&gcwq->lock); 1272 } 1273 1274 /* gcwq determined, get cwq and queue */ 1275 cwq = get_cwq(gcwq->cpu, wq); 1276 trace_workqueue_queue_work(req_cpu, cwq, work); 1277 1278 if (WARN_ON(!list_empty(&work->entry))) { 1279 spin_unlock(&gcwq->lock); 1280 return; 1281 } 1282 1283 cwq->nr_in_flight[cwq->work_color]++; 1284 work_flags = work_color_to_flags(cwq->work_color); 1285 1286 if (likely(cwq->nr_active < cwq->max_active)) { 1287 trace_workqueue_activate_work(work); 1288 cwq->nr_active++; 1289 worklist = &cwq->pool->worklist; 1290 } else { 1291 work_flags |= WORK_STRUCT_DELAYED; 1292 worklist = &cwq->delayed_works; 1293 } 1294 1295 insert_work(cwq, work, worklist, work_flags); 1296 1297 spin_unlock(&gcwq->lock); 1298 } 1299 1300 /** 1301 * queue_work_on - queue work on specific cpu 1302 * @cpu: CPU number to execute work on 1303 * @wq: workqueue to use 1304 * @work: work to queue 1305 * 1306 * Returns %false if @work was already on a queue, %true otherwise. 1307 * 1308 * We queue the work to a specific CPU, the caller must ensure it 1309 * can't go away. 1310 */ 1311 bool queue_work_on(int cpu, struct workqueue_struct *wq, 1312 struct work_struct *work) 1313 { 1314 bool ret = false; 1315 unsigned long flags; 1316 1317 local_irq_save(flags); 1318 1319 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 1320 __queue_work(cpu, wq, work); 1321 ret = true; 1322 } 1323 1324 local_irq_restore(flags); 1325 return ret; 1326 } 1327 EXPORT_SYMBOL_GPL(queue_work_on); 1328 1329 /** 1330 * queue_work - queue work on a workqueue 1331 * @wq: workqueue to use 1332 * @work: work to queue 1333 * 1334 * Returns %false if @work was already on a queue, %true otherwise. 1335 * 1336 * We queue the work to the CPU on which it was submitted, but if the CPU dies 1337 * it can be processed by another CPU. 1338 */ 1339 bool queue_work(struct workqueue_struct *wq, struct work_struct *work) 1340 { 1341 return queue_work_on(WORK_CPU_UNBOUND, wq, work); 1342 } 1343 EXPORT_SYMBOL_GPL(queue_work); 1344 1345 void delayed_work_timer_fn(unsigned long __data) 1346 { 1347 struct delayed_work *dwork = (struct delayed_work *)__data; 1348 struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work); 1349 1350 /* should have been called from irqsafe timer with irq already off */ 1351 __queue_work(dwork->cpu, cwq->wq, &dwork->work); 1352 } 1353 EXPORT_SYMBOL_GPL(delayed_work_timer_fn); 1354 1355 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, 1356 struct delayed_work *dwork, unsigned long delay) 1357 { 1358 struct timer_list *timer = &dwork->timer; 1359 struct work_struct *work = &dwork->work; 1360 unsigned int lcpu; 1361 1362 WARN_ON_ONCE(timer->function != delayed_work_timer_fn || 1363 timer->data != (unsigned long)dwork); 1364 BUG_ON(timer_pending(timer)); 1365 BUG_ON(!list_empty(&work->entry)); 1366 1367 timer_stats_timer_set_start_info(&dwork->timer); 1368 1369 /* 1370 * This stores cwq for the moment, for the timer_fn. Note that the 1371 * work's gcwq is preserved to allow reentrance detection for 1372 * delayed works. 1373 */ 1374 if (!(wq->flags & WQ_UNBOUND)) { 1375 struct global_cwq *gcwq = get_work_gcwq(work); 1376 1377 /* 1378 * If we cannot get the last gcwq from @work directly, 1379 * select the last CPU such that it avoids unnecessarily 1380 * triggering non-reentrancy check in __queue_work(). 1381 */ 1382 lcpu = cpu; 1383 if (gcwq) 1384 lcpu = gcwq->cpu; 1385 if (lcpu == WORK_CPU_UNBOUND) 1386 lcpu = raw_smp_processor_id(); 1387 } else { 1388 lcpu = WORK_CPU_UNBOUND; 1389 } 1390 1391 set_work_cwq(work, get_cwq(lcpu, wq), 0); 1392 1393 dwork->cpu = cpu; 1394 timer->expires = jiffies + delay; 1395 1396 if (unlikely(cpu != WORK_CPU_UNBOUND)) 1397 add_timer_on(timer, cpu); 1398 else 1399 add_timer(timer); 1400 } 1401 1402 /** 1403 * queue_delayed_work_on - queue work on specific CPU after delay 1404 * @cpu: CPU number to execute work on 1405 * @wq: workqueue to use 1406 * @dwork: work to queue 1407 * @delay: number of jiffies to wait before queueing 1408 * 1409 * Returns %false if @work was already on a queue, %true otherwise. If 1410 * @delay is zero and @dwork is idle, it will be scheduled for immediate 1411 * execution. 1412 */ 1413 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 1414 struct delayed_work *dwork, unsigned long delay) 1415 { 1416 struct work_struct *work = &dwork->work; 1417 bool ret = false; 1418 unsigned long flags; 1419 1420 if (!delay) 1421 return queue_work_on(cpu, wq, &dwork->work); 1422 1423 /* read the comment in __queue_work() */ 1424 local_irq_save(flags); 1425 1426 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 1427 __queue_delayed_work(cpu, wq, dwork, delay); 1428 ret = true; 1429 } 1430 1431 local_irq_restore(flags); 1432 return ret; 1433 } 1434 EXPORT_SYMBOL_GPL(queue_delayed_work_on); 1435 1436 /** 1437 * queue_delayed_work - queue work on a workqueue after delay 1438 * @wq: workqueue to use 1439 * @dwork: delayable work to queue 1440 * @delay: number of jiffies to wait before queueing 1441 * 1442 * Equivalent to queue_delayed_work_on() but tries to use the local CPU. 1443 */ 1444 bool queue_delayed_work(struct workqueue_struct *wq, 1445 struct delayed_work *dwork, unsigned long delay) 1446 { 1447 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); 1448 } 1449 EXPORT_SYMBOL_GPL(queue_delayed_work); 1450 1451 /** 1452 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU 1453 * @cpu: CPU number to execute work on 1454 * @wq: workqueue to use 1455 * @dwork: work to queue 1456 * @delay: number of jiffies to wait before queueing 1457 * 1458 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise, 1459 * modify @dwork's timer so that it expires after @delay. If @delay is 1460 * zero, @work is guaranteed to be scheduled immediately regardless of its 1461 * current state. 1462 * 1463 * Returns %false if @dwork was idle and queued, %true if @dwork was 1464 * pending and its timer was modified. 1465 * 1466 * This function is safe to call from any context including IRQ handler. 1467 * See try_to_grab_pending() for details. 1468 */ 1469 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 1470 struct delayed_work *dwork, unsigned long delay) 1471 { 1472 unsigned long flags; 1473 int ret; 1474 1475 do { 1476 ret = try_to_grab_pending(&dwork->work, true, &flags); 1477 } while (unlikely(ret == -EAGAIN)); 1478 1479 if (likely(ret >= 0)) { 1480 __queue_delayed_work(cpu, wq, dwork, delay); 1481 local_irq_restore(flags); 1482 } 1483 1484 /* -ENOENT from try_to_grab_pending() becomes %true */ 1485 return ret; 1486 } 1487 EXPORT_SYMBOL_GPL(mod_delayed_work_on); 1488 1489 /** 1490 * mod_delayed_work - modify delay of or queue a delayed work 1491 * @wq: workqueue to use 1492 * @dwork: work to queue 1493 * @delay: number of jiffies to wait before queueing 1494 * 1495 * mod_delayed_work_on() on local CPU. 1496 */ 1497 bool mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork, 1498 unsigned long delay) 1499 { 1500 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); 1501 } 1502 EXPORT_SYMBOL_GPL(mod_delayed_work); 1503 1504 /** 1505 * worker_enter_idle - enter idle state 1506 * @worker: worker which is entering idle state 1507 * 1508 * @worker is entering idle state. Update stats and idle timer if 1509 * necessary. 1510 * 1511 * LOCKING: 1512 * spin_lock_irq(gcwq->lock). 1513 */ 1514 static void worker_enter_idle(struct worker *worker) 1515 { 1516 struct worker_pool *pool = worker->pool; 1517 struct global_cwq *gcwq = pool->gcwq; 1518 1519 BUG_ON(worker->flags & WORKER_IDLE); 1520 BUG_ON(!list_empty(&worker->entry) && 1521 (worker->hentry.next || worker->hentry.pprev)); 1522 1523 /* can't use worker_set_flags(), also called from start_worker() */ 1524 worker->flags |= WORKER_IDLE; 1525 pool->nr_idle++; 1526 worker->last_active = jiffies; 1527 1528 /* idle_list is LIFO */ 1529 list_add(&worker->entry, &pool->idle_list); 1530 1531 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) 1532 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); 1533 1534 /* 1535 * Sanity check nr_running. Because gcwq_unbind_fn() releases 1536 * gcwq->lock between setting %WORKER_UNBOUND and zapping 1537 * nr_running, the warning may trigger spuriously. Check iff 1538 * unbind is not in progress. 1539 */ 1540 WARN_ON_ONCE(!(gcwq->flags & GCWQ_DISASSOCIATED) && 1541 pool->nr_workers == pool->nr_idle && 1542 atomic_read(get_pool_nr_running(pool))); 1543 } 1544 1545 /** 1546 * worker_leave_idle - leave idle state 1547 * @worker: worker which is leaving idle state 1548 * 1549 * @worker is leaving idle state. Update stats. 1550 * 1551 * LOCKING: 1552 * spin_lock_irq(gcwq->lock). 1553 */ 1554 static void worker_leave_idle(struct worker *worker) 1555 { 1556 struct worker_pool *pool = worker->pool; 1557 1558 BUG_ON(!(worker->flags & WORKER_IDLE)); 1559 worker_clr_flags(worker, WORKER_IDLE); 1560 pool->nr_idle--; 1561 list_del_init(&worker->entry); 1562 } 1563 1564 /** 1565 * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq 1566 * @worker: self 1567 * 1568 * Works which are scheduled while the cpu is online must at least be 1569 * scheduled to a worker which is bound to the cpu so that if they are 1570 * flushed from cpu callbacks while cpu is going down, they are 1571 * guaranteed to execute on the cpu. 1572 * 1573 * This function is to be used by rogue workers and rescuers to bind 1574 * themselves to the target cpu and may race with cpu going down or 1575 * coming online. kthread_bind() can't be used because it may put the 1576 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used 1577 * verbatim as it's best effort and blocking and gcwq may be 1578 * [dis]associated in the meantime. 1579 * 1580 * This function tries set_cpus_allowed() and locks gcwq and verifies the 1581 * binding against %GCWQ_DISASSOCIATED which is set during 1582 * %CPU_DOWN_PREPARE and cleared during %CPU_ONLINE, so if the worker 1583 * enters idle state or fetches works without dropping lock, it can 1584 * guarantee the scheduling requirement described in the first paragraph. 1585 * 1586 * CONTEXT: 1587 * Might sleep. Called without any lock but returns with gcwq->lock 1588 * held. 1589 * 1590 * RETURNS: 1591 * %true if the associated gcwq is online (@worker is successfully 1592 * bound), %false if offline. 1593 */ 1594 static bool worker_maybe_bind_and_lock(struct worker *worker) 1595 __acquires(&gcwq->lock) 1596 { 1597 struct global_cwq *gcwq = worker->pool->gcwq; 1598 struct task_struct *task = worker->task; 1599 1600 while (true) { 1601 /* 1602 * The following call may fail, succeed or succeed 1603 * without actually migrating the task to the cpu if 1604 * it races with cpu hotunplug operation. Verify 1605 * against GCWQ_DISASSOCIATED. 1606 */ 1607 if (!(gcwq->flags & GCWQ_DISASSOCIATED)) 1608 set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu)); 1609 1610 spin_lock_irq(&gcwq->lock); 1611 if (gcwq->flags & GCWQ_DISASSOCIATED) 1612 return false; 1613 if (task_cpu(task) == gcwq->cpu && 1614 cpumask_equal(¤t->cpus_allowed, 1615 get_cpu_mask(gcwq->cpu))) 1616 return true; 1617 spin_unlock_irq(&gcwq->lock); 1618 1619 /* 1620 * We've raced with CPU hot[un]plug. Give it a breather 1621 * and retry migration. cond_resched() is required here; 1622 * otherwise, we might deadlock against cpu_stop trying to 1623 * bring down the CPU on non-preemptive kernel. 1624 */ 1625 cpu_relax(); 1626 cond_resched(); 1627 } 1628 } 1629 1630 /* 1631 * Rebind an idle @worker to its CPU. worker_thread() will test 1632 * list_empty(@worker->entry) before leaving idle and call this function. 1633 */ 1634 static void idle_worker_rebind(struct worker *worker) 1635 { 1636 struct global_cwq *gcwq = worker->pool->gcwq; 1637 1638 /* CPU may go down again inbetween, clear UNBOUND only on success */ 1639 if (worker_maybe_bind_and_lock(worker)) 1640 worker_clr_flags(worker, WORKER_UNBOUND); 1641 1642 /* rebind complete, become available again */ 1643 list_add(&worker->entry, &worker->pool->idle_list); 1644 spin_unlock_irq(&gcwq->lock); 1645 } 1646 1647 /* 1648 * Function for @worker->rebind.work used to rebind unbound busy workers to 1649 * the associated cpu which is coming back online. This is scheduled by 1650 * cpu up but can race with other cpu hotplug operations and may be 1651 * executed twice without intervening cpu down. 1652 */ 1653 static void busy_worker_rebind_fn(struct work_struct *work) 1654 { 1655 struct worker *worker = container_of(work, struct worker, rebind_work); 1656 struct global_cwq *gcwq = worker->pool->gcwq; 1657 1658 if (worker_maybe_bind_and_lock(worker)) 1659 worker_clr_flags(worker, WORKER_UNBOUND); 1660 1661 spin_unlock_irq(&gcwq->lock); 1662 } 1663 1664 /** 1665 * rebind_workers - rebind all workers of a gcwq to the associated CPU 1666 * @gcwq: gcwq of interest 1667 * 1668 * @gcwq->cpu is coming online. Rebind all workers to the CPU. Rebinding 1669 * is different for idle and busy ones. 1670 * 1671 * Idle ones will be removed from the idle_list and woken up. They will 1672 * add themselves back after completing rebind. This ensures that the 1673 * idle_list doesn't contain any unbound workers when re-bound busy workers 1674 * try to perform local wake-ups for concurrency management. 1675 * 1676 * Busy workers can rebind after they finish their current work items. 1677 * Queueing the rebind work item at the head of the scheduled list is 1678 * enough. Note that nr_running will be properly bumped as busy workers 1679 * rebind. 1680 * 1681 * On return, all non-manager workers are scheduled for rebind - see 1682 * manage_workers() for the manager special case. Any idle worker 1683 * including the manager will not appear on @idle_list until rebind is 1684 * complete, making local wake-ups safe. 1685 */ 1686 static void rebind_workers(struct global_cwq *gcwq) 1687 { 1688 struct worker_pool *pool; 1689 struct worker *worker, *n; 1690 struct hlist_node *pos; 1691 int i; 1692 1693 lockdep_assert_held(&gcwq->lock); 1694 1695 for_each_worker_pool(pool, gcwq) 1696 lockdep_assert_held(&pool->assoc_mutex); 1697 1698 /* dequeue and kick idle ones */ 1699 for_each_worker_pool(pool, gcwq) { 1700 list_for_each_entry_safe(worker, n, &pool->idle_list, entry) { 1701 /* 1702 * idle workers should be off @pool->idle_list 1703 * until rebind is complete to avoid receiving 1704 * premature local wake-ups. 1705 */ 1706 list_del_init(&worker->entry); 1707 1708 /* 1709 * worker_thread() will see the above dequeuing 1710 * and call idle_worker_rebind(). 1711 */ 1712 wake_up_process(worker->task); 1713 } 1714 } 1715 1716 /* rebind busy workers */ 1717 for_each_busy_worker(worker, i, pos, gcwq) { 1718 struct work_struct *rebind_work = &worker->rebind_work; 1719 struct workqueue_struct *wq; 1720 1721 if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, 1722 work_data_bits(rebind_work))) 1723 continue; 1724 1725 debug_work_activate(rebind_work); 1726 1727 /* 1728 * wq doesn't really matter but let's keep @worker->pool 1729 * and @cwq->pool consistent for sanity. 1730 */ 1731 if (worker_pool_pri(worker->pool)) 1732 wq = system_highpri_wq; 1733 else 1734 wq = system_wq; 1735 1736 insert_work(get_cwq(gcwq->cpu, wq), rebind_work, 1737 worker->scheduled.next, 1738 work_color_to_flags(WORK_NO_COLOR)); 1739 } 1740 } 1741 1742 static struct worker *alloc_worker(void) 1743 { 1744 struct worker *worker; 1745 1746 worker = kzalloc(sizeof(*worker), GFP_KERNEL); 1747 if (worker) { 1748 INIT_LIST_HEAD(&worker->entry); 1749 INIT_LIST_HEAD(&worker->scheduled); 1750 INIT_WORK(&worker->rebind_work, busy_worker_rebind_fn); 1751 /* on creation a worker is in !idle && prep state */ 1752 worker->flags = WORKER_PREP; 1753 } 1754 return worker; 1755 } 1756 1757 /** 1758 * create_worker - create a new workqueue worker 1759 * @pool: pool the new worker will belong to 1760 * 1761 * Create a new worker which is bound to @pool. The returned worker 1762 * can be started by calling start_worker() or destroyed using 1763 * destroy_worker(). 1764 * 1765 * CONTEXT: 1766 * Might sleep. Does GFP_KERNEL allocations. 1767 * 1768 * RETURNS: 1769 * Pointer to the newly created worker. 1770 */ 1771 static struct worker *create_worker(struct worker_pool *pool) 1772 { 1773 struct global_cwq *gcwq = pool->gcwq; 1774 const char *pri = worker_pool_pri(pool) ? "H" : ""; 1775 struct worker *worker = NULL; 1776 int id = -1; 1777 1778 spin_lock_irq(&gcwq->lock); 1779 while (ida_get_new(&pool->worker_ida, &id)) { 1780 spin_unlock_irq(&gcwq->lock); 1781 if (!ida_pre_get(&pool->worker_ida, GFP_KERNEL)) 1782 goto fail; 1783 spin_lock_irq(&gcwq->lock); 1784 } 1785 spin_unlock_irq(&gcwq->lock); 1786 1787 worker = alloc_worker(); 1788 if (!worker) 1789 goto fail; 1790 1791 worker->pool = pool; 1792 worker->id = id; 1793 1794 if (gcwq->cpu != WORK_CPU_UNBOUND) 1795 worker->task = kthread_create_on_node(worker_thread, 1796 worker, cpu_to_node(gcwq->cpu), 1797 "kworker/%u:%d%s", gcwq->cpu, id, pri); 1798 else 1799 worker->task = kthread_create(worker_thread, worker, 1800 "kworker/u:%d%s", id, pri); 1801 if (IS_ERR(worker->task)) 1802 goto fail; 1803 1804 if (worker_pool_pri(pool)) 1805 set_user_nice(worker->task, HIGHPRI_NICE_LEVEL); 1806 1807 /* 1808 * Determine CPU binding of the new worker depending on 1809 * %GCWQ_DISASSOCIATED. The caller is responsible for ensuring the 1810 * flag remains stable across this function. See the comments 1811 * above the flag definition for details. 1812 * 1813 * As an unbound worker may later become a regular one if CPU comes 1814 * online, make sure every worker has %PF_THREAD_BOUND set. 1815 */ 1816 if (!(gcwq->flags & GCWQ_DISASSOCIATED)) { 1817 kthread_bind(worker->task, gcwq->cpu); 1818 } else { 1819 worker->task->flags |= PF_THREAD_BOUND; 1820 worker->flags |= WORKER_UNBOUND; 1821 } 1822 1823 return worker; 1824 fail: 1825 if (id >= 0) { 1826 spin_lock_irq(&gcwq->lock); 1827 ida_remove(&pool->worker_ida, id); 1828 spin_unlock_irq(&gcwq->lock); 1829 } 1830 kfree(worker); 1831 return NULL; 1832 } 1833 1834 /** 1835 * start_worker - start a newly created worker 1836 * @worker: worker to start 1837 * 1838 * Make the gcwq aware of @worker and start it. 1839 * 1840 * CONTEXT: 1841 * spin_lock_irq(gcwq->lock). 1842 */ 1843 static void start_worker(struct worker *worker) 1844 { 1845 worker->flags |= WORKER_STARTED; 1846 worker->pool->nr_workers++; 1847 worker_enter_idle(worker); 1848 wake_up_process(worker->task); 1849 } 1850 1851 /** 1852 * destroy_worker - destroy a workqueue worker 1853 * @worker: worker to be destroyed 1854 * 1855 * Destroy @worker and adjust @gcwq stats accordingly. 1856 * 1857 * CONTEXT: 1858 * spin_lock_irq(gcwq->lock) which is released and regrabbed. 1859 */ 1860 static void destroy_worker(struct worker *worker) 1861 { 1862 struct worker_pool *pool = worker->pool; 1863 struct global_cwq *gcwq = pool->gcwq; 1864 int id = worker->id; 1865 1866 /* sanity check frenzy */ 1867 BUG_ON(worker->current_work); 1868 BUG_ON(!list_empty(&worker->scheduled)); 1869 1870 if (worker->flags & WORKER_STARTED) 1871 pool->nr_workers--; 1872 if (worker->flags & WORKER_IDLE) 1873 pool->nr_idle--; 1874 1875 list_del_init(&worker->entry); 1876 worker->flags |= WORKER_DIE; 1877 1878 spin_unlock_irq(&gcwq->lock); 1879 1880 kthread_stop(worker->task); 1881 kfree(worker); 1882 1883 spin_lock_irq(&gcwq->lock); 1884 ida_remove(&pool->worker_ida, id); 1885 } 1886 1887 static void idle_worker_timeout(unsigned long __pool) 1888 { 1889 struct worker_pool *pool = (void *)__pool; 1890 struct global_cwq *gcwq = pool->gcwq; 1891 1892 spin_lock_irq(&gcwq->lock); 1893 1894 if (too_many_workers(pool)) { 1895 struct worker *worker; 1896 unsigned long expires; 1897 1898 /* idle_list is kept in LIFO order, check the last one */ 1899 worker = list_entry(pool->idle_list.prev, struct worker, entry); 1900 expires = worker->last_active + IDLE_WORKER_TIMEOUT; 1901 1902 if (time_before(jiffies, expires)) 1903 mod_timer(&pool->idle_timer, expires); 1904 else { 1905 /* it's been idle for too long, wake up manager */ 1906 pool->flags |= POOL_MANAGE_WORKERS; 1907 wake_up_worker(pool); 1908 } 1909 } 1910 1911 spin_unlock_irq(&gcwq->lock); 1912 } 1913 1914 static bool send_mayday(struct work_struct *work) 1915 { 1916 struct cpu_workqueue_struct *cwq = get_work_cwq(work); 1917 struct workqueue_struct *wq = cwq->wq; 1918 unsigned int cpu; 1919 1920 if (!(wq->flags & WQ_RESCUER)) 1921 return false; 1922 1923 /* mayday mayday mayday */ 1924 cpu = cwq->pool->gcwq->cpu; 1925 /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */ 1926 if (cpu == WORK_CPU_UNBOUND) 1927 cpu = 0; 1928 if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask)) 1929 wake_up_process(wq->rescuer->task); 1930 return true; 1931 } 1932 1933 static void gcwq_mayday_timeout(unsigned long __pool) 1934 { 1935 struct worker_pool *pool = (void *)__pool; 1936 struct global_cwq *gcwq = pool->gcwq; 1937 struct work_struct *work; 1938 1939 spin_lock_irq(&gcwq->lock); 1940 1941 if (need_to_create_worker(pool)) { 1942 /* 1943 * We've been trying to create a new worker but 1944 * haven't been successful. We might be hitting an 1945 * allocation deadlock. Send distress signals to 1946 * rescuers. 1947 */ 1948 list_for_each_entry(work, &pool->worklist, entry) 1949 send_mayday(work); 1950 } 1951 1952 spin_unlock_irq(&gcwq->lock); 1953 1954 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); 1955 } 1956 1957 /** 1958 * maybe_create_worker - create a new worker if necessary 1959 * @pool: pool to create a new worker for 1960 * 1961 * Create a new worker for @pool if necessary. @pool is guaranteed to 1962 * have at least one idle worker on return from this function. If 1963 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is 1964 * sent to all rescuers with works scheduled on @pool to resolve 1965 * possible allocation deadlock. 1966 * 1967 * On return, need_to_create_worker() is guaranteed to be false and 1968 * may_start_working() true. 1969 * 1970 * LOCKING: 1971 * spin_lock_irq(gcwq->lock) which may be released and regrabbed 1972 * multiple times. Does GFP_KERNEL allocations. Called only from 1973 * manager. 1974 * 1975 * RETURNS: 1976 * false if no action was taken and gcwq->lock stayed locked, true 1977 * otherwise. 1978 */ 1979 static bool maybe_create_worker(struct worker_pool *pool) 1980 __releases(&gcwq->lock) 1981 __acquires(&gcwq->lock) 1982 { 1983 struct global_cwq *gcwq = pool->gcwq; 1984 1985 if (!need_to_create_worker(pool)) 1986 return false; 1987 restart: 1988 spin_unlock_irq(&gcwq->lock); 1989 1990 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ 1991 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); 1992 1993 while (true) { 1994 struct worker *worker; 1995 1996 worker = create_worker(pool); 1997 if (worker) { 1998 del_timer_sync(&pool->mayday_timer); 1999 spin_lock_irq(&gcwq->lock); 2000 start_worker(worker); 2001 BUG_ON(need_to_create_worker(pool)); 2002 return true; 2003 } 2004 2005 if (!need_to_create_worker(pool)) 2006 break; 2007 2008 __set_current_state(TASK_INTERRUPTIBLE); 2009 schedule_timeout(CREATE_COOLDOWN); 2010 2011 if (!need_to_create_worker(pool)) 2012 break; 2013 } 2014 2015 del_timer_sync(&pool->mayday_timer); 2016 spin_lock_irq(&gcwq->lock); 2017 if (need_to_create_worker(pool)) 2018 goto restart; 2019 return true; 2020 } 2021 2022 /** 2023 * maybe_destroy_worker - destroy workers which have been idle for a while 2024 * @pool: pool to destroy workers for 2025 * 2026 * Destroy @pool workers which have been idle for longer than 2027 * IDLE_WORKER_TIMEOUT. 2028 * 2029 * LOCKING: 2030 * spin_lock_irq(gcwq->lock) which may be released and regrabbed 2031 * multiple times. Called only from manager. 2032 * 2033 * RETURNS: 2034 * false if no action was taken and gcwq->lock stayed locked, true 2035 * otherwise. 2036 */ 2037 static bool maybe_destroy_workers(struct worker_pool *pool) 2038 { 2039 bool ret = false; 2040 2041 while (too_many_workers(pool)) { 2042 struct worker *worker; 2043 unsigned long expires; 2044 2045 worker = list_entry(pool->idle_list.prev, struct worker, entry); 2046 expires = worker->last_active + IDLE_WORKER_TIMEOUT; 2047 2048 if (time_before(jiffies, expires)) { 2049 mod_timer(&pool->idle_timer, expires); 2050 break; 2051 } 2052 2053 destroy_worker(worker); 2054 ret = true; 2055 } 2056 2057 return ret; 2058 } 2059 2060 /** 2061 * manage_workers - manage worker pool 2062 * @worker: self 2063 * 2064 * Assume the manager role and manage gcwq worker pool @worker belongs 2065 * to. At any given time, there can be only zero or one manager per 2066 * gcwq. The exclusion is handled automatically by this function. 2067 * 2068 * The caller can safely start processing works on false return. On 2069 * true return, it's guaranteed that need_to_create_worker() is false 2070 * and may_start_working() is true. 2071 * 2072 * CONTEXT: 2073 * spin_lock_irq(gcwq->lock) which may be released and regrabbed 2074 * multiple times. Does GFP_KERNEL allocations. 2075 * 2076 * RETURNS: 2077 * false if no action was taken and gcwq->lock stayed locked, true if 2078 * some action was taken. 2079 */ 2080 static bool manage_workers(struct worker *worker) 2081 { 2082 struct worker_pool *pool = worker->pool; 2083 bool ret = false; 2084 2085 if (pool->flags & POOL_MANAGING_WORKERS) 2086 return ret; 2087 2088 pool->flags |= POOL_MANAGING_WORKERS; 2089 2090 /* 2091 * To simplify both worker management and CPU hotplug, hold off 2092 * management while hotplug is in progress. CPU hotplug path can't 2093 * grab %POOL_MANAGING_WORKERS to achieve this because that can 2094 * lead to idle worker depletion (all become busy thinking someone 2095 * else is managing) which in turn can result in deadlock under 2096 * extreme circumstances. Use @pool->assoc_mutex to synchronize 2097 * manager against CPU hotplug. 2098 * 2099 * assoc_mutex would always be free unless CPU hotplug is in 2100 * progress. trylock first without dropping @gcwq->lock. 2101 */ 2102 if (unlikely(!mutex_trylock(&pool->assoc_mutex))) { 2103 spin_unlock_irq(&pool->gcwq->lock); 2104 mutex_lock(&pool->assoc_mutex); 2105 /* 2106 * CPU hotplug could have happened while we were waiting 2107 * for assoc_mutex. Hotplug itself can't handle us 2108 * because manager isn't either on idle or busy list, and 2109 * @gcwq's state and ours could have deviated. 2110 * 2111 * As hotplug is now excluded via assoc_mutex, we can 2112 * simply try to bind. It will succeed or fail depending 2113 * on @gcwq's current state. Try it and adjust 2114 * %WORKER_UNBOUND accordingly. 2115 */ 2116 if (worker_maybe_bind_and_lock(worker)) 2117 worker->flags &= ~WORKER_UNBOUND; 2118 else 2119 worker->flags |= WORKER_UNBOUND; 2120 2121 ret = true; 2122 } 2123 2124 pool->flags &= ~POOL_MANAGE_WORKERS; 2125 2126 /* 2127 * Destroy and then create so that may_start_working() is true 2128 * on return. 2129 */ 2130 ret |= maybe_destroy_workers(pool); 2131 ret |= maybe_create_worker(pool); 2132 2133 pool->flags &= ~POOL_MANAGING_WORKERS; 2134 mutex_unlock(&pool->assoc_mutex); 2135 return ret; 2136 } 2137 2138 /** 2139 * process_one_work - process single work 2140 * @worker: self 2141 * @work: work to process 2142 * 2143 * Process @work. This function contains all the logics necessary to 2144 * process a single work including synchronization against and 2145 * interaction with other workers on the same cpu, queueing and 2146 * flushing. As long as context requirement is met, any worker can 2147 * call this function to process a work. 2148 * 2149 * CONTEXT: 2150 * spin_lock_irq(gcwq->lock) which is released and regrabbed. 2151 */ 2152 static void process_one_work(struct worker *worker, struct work_struct *work) 2153 __releases(&gcwq->lock) 2154 __acquires(&gcwq->lock) 2155 { 2156 struct cpu_workqueue_struct *cwq = get_work_cwq(work); 2157 struct worker_pool *pool = worker->pool; 2158 struct global_cwq *gcwq = pool->gcwq; 2159 struct hlist_head *bwh = busy_worker_head(gcwq, work); 2160 bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE; 2161 work_func_t f = work->func; 2162 int work_color; 2163 struct worker *collision; 2164 #ifdef CONFIG_LOCKDEP 2165 /* 2166 * It is permissible to free the struct work_struct from 2167 * inside the function that is called from it, this we need to 2168 * take into account for lockdep too. To avoid bogus "held 2169 * lock freed" warnings as well as problems when looking into 2170 * work->lockdep_map, make a copy and use that here. 2171 */ 2172 struct lockdep_map lockdep_map; 2173 2174 lockdep_copy_map(&lockdep_map, &work->lockdep_map); 2175 #endif 2176 /* 2177 * Ensure we're on the correct CPU. DISASSOCIATED test is 2178 * necessary to avoid spurious warnings from rescuers servicing the 2179 * unbound or a disassociated gcwq. 2180 */ 2181 WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) && 2182 !(gcwq->flags & GCWQ_DISASSOCIATED) && 2183 raw_smp_processor_id() != gcwq->cpu); 2184 2185 /* 2186 * A single work shouldn't be executed concurrently by 2187 * multiple workers on a single cpu. Check whether anyone is 2188 * already processing the work. If so, defer the work to the 2189 * currently executing one. 2190 */ 2191 collision = __find_worker_executing_work(gcwq, bwh, work); 2192 if (unlikely(collision)) { 2193 move_linked_works(work, &collision->scheduled, NULL); 2194 return; 2195 } 2196 2197 /* claim and dequeue */ 2198 debug_work_deactivate(work); 2199 hlist_add_head(&worker->hentry, bwh); 2200 worker->current_work = work; 2201 worker->current_cwq = cwq; 2202 work_color = get_work_color(work); 2203 2204 list_del_init(&work->entry); 2205 2206 /* 2207 * CPU intensive works don't participate in concurrency 2208 * management. They're the scheduler's responsibility. 2209 */ 2210 if (unlikely(cpu_intensive)) 2211 worker_set_flags(worker, WORKER_CPU_INTENSIVE, true); 2212 2213 /* 2214 * Unbound gcwq isn't concurrency managed and work items should be 2215 * executed ASAP. Wake up another worker if necessary. 2216 */ 2217 if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool)) 2218 wake_up_worker(pool); 2219 2220 /* 2221 * Record the last CPU and clear PENDING which should be the last 2222 * update to @work. Also, do this inside @gcwq->lock so that 2223 * PENDING and queued state changes happen together while IRQ is 2224 * disabled. 2225 */ 2226 set_work_cpu_and_clear_pending(work, gcwq->cpu); 2227 2228 spin_unlock_irq(&gcwq->lock); 2229 2230 lock_map_acquire_read(&cwq->wq->lockdep_map); 2231 lock_map_acquire(&lockdep_map); 2232 trace_workqueue_execute_start(work); 2233 f(work); 2234 /* 2235 * While we must be careful to not use "work" after this, the trace 2236 * point will only record its address. 2237 */ 2238 trace_workqueue_execute_end(work); 2239 lock_map_release(&lockdep_map); 2240 lock_map_release(&cwq->wq->lockdep_map); 2241 2242 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 2243 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" 2244 " last function: %pf\n", 2245 current->comm, preempt_count(), task_pid_nr(current), f); 2246 debug_show_held_locks(current); 2247 dump_stack(); 2248 } 2249 2250 spin_lock_irq(&gcwq->lock); 2251 2252 /* clear cpu intensive status */ 2253 if (unlikely(cpu_intensive)) 2254 worker_clr_flags(worker, WORKER_CPU_INTENSIVE); 2255 2256 /* we're done with it, release */ 2257 hlist_del_init(&worker->hentry); 2258 worker->current_work = NULL; 2259 worker->current_cwq = NULL; 2260 cwq_dec_nr_in_flight(cwq, work_color); 2261 } 2262 2263 /** 2264 * process_scheduled_works - process scheduled works 2265 * @worker: self 2266 * 2267 * Process all scheduled works. Please note that the scheduled list 2268 * may change while processing a work, so this function repeatedly 2269 * fetches a work from the top and executes it. 2270 * 2271 * CONTEXT: 2272 * spin_lock_irq(gcwq->lock) which may be released and regrabbed 2273 * multiple times. 2274 */ 2275 static void process_scheduled_works(struct worker *worker) 2276 { 2277 while (!list_empty(&worker->scheduled)) { 2278 struct work_struct *work = list_first_entry(&worker->scheduled, 2279 struct work_struct, entry); 2280 process_one_work(worker, work); 2281 } 2282 } 2283 2284 /** 2285 * worker_thread - the worker thread function 2286 * @__worker: self 2287 * 2288 * The gcwq worker thread function. There's a single dynamic pool of 2289 * these per each cpu. These workers process all works regardless of 2290 * their specific target workqueue. The only exception is works which 2291 * belong to workqueues with a rescuer which will be explained in 2292 * rescuer_thread(). 2293 */ 2294 static int worker_thread(void *__worker) 2295 { 2296 struct worker *worker = __worker; 2297 struct worker_pool *pool = worker->pool; 2298 struct global_cwq *gcwq = pool->gcwq; 2299 2300 /* tell the scheduler that this is a workqueue worker */ 2301 worker->task->flags |= PF_WQ_WORKER; 2302 woke_up: 2303 spin_lock_irq(&gcwq->lock); 2304 2305 /* we are off idle list if destruction or rebind is requested */ 2306 if (unlikely(list_empty(&worker->entry))) { 2307 spin_unlock_irq(&gcwq->lock); 2308 2309 /* if DIE is set, destruction is requested */ 2310 if (worker->flags & WORKER_DIE) { 2311 worker->task->flags &= ~PF_WQ_WORKER; 2312 return 0; 2313 } 2314 2315 /* otherwise, rebind */ 2316 idle_worker_rebind(worker); 2317 goto woke_up; 2318 } 2319 2320 worker_leave_idle(worker); 2321 recheck: 2322 /* no more worker necessary? */ 2323 if (!need_more_worker(pool)) 2324 goto sleep; 2325 2326 /* do we need to manage? */ 2327 if (unlikely(!may_start_working(pool)) && manage_workers(worker)) 2328 goto recheck; 2329 2330 /* 2331 * ->scheduled list can only be filled while a worker is 2332 * preparing to process a work or actually processing it. 2333 * Make sure nobody diddled with it while I was sleeping. 2334 */ 2335 BUG_ON(!list_empty(&worker->scheduled)); 2336 2337 /* 2338 * When control reaches this point, we're guaranteed to have 2339 * at least one idle worker or that someone else has already 2340 * assumed the manager role. 2341 */ 2342 worker_clr_flags(worker, WORKER_PREP); 2343 2344 do { 2345 struct work_struct *work = 2346 list_first_entry(&pool->worklist, 2347 struct work_struct, entry); 2348 2349 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { 2350 /* optimization path, not strictly necessary */ 2351 process_one_work(worker, work); 2352 if (unlikely(!list_empty(&worker->scheduled))) 2353 process_scheduled_works(worker); 2354 } else { 2355 move_linked_works(work, &worker->scheduled, NULL); 2356 process_scheduled_works(worker); 2357 } 2358 } while (keep_working(pool)); 2359 2360 worker_set_flags(worker, WORKER_PREP, false); 2361 sleep: 2362 if (unlikely(need_to_manage_workers(pool)) && manage_workers(worker)) 2363 goto recheck; 2364 2365 /* 2366 * gcwq->lock is held and there's no work to process and no 2367 * need to manage, sleep. Workers are woken up only while 2368 * holding gcwq->lock or from local cpu, so setting the 2369 * current state before releasing gcwq->lock is enough to 2370 * prevent losing any event. 2371 */ 2372 worker_enter_idle(worker); 2373 __set_current_state(TASK_INTERRUPTIBLE); 2374 spin_unlock_irq(&gcwq->lock); 2375 schedule(); 2376 goto woke_up; 2377 } 2378 2379 /** 2380 * rescuer_thread - the rescuer thread function 2381 * @__wq: the associated workqueue 2382 * 2383 * Workqueue rescuer thread function. There's one rescuer for each 2384 * workqueue which has WQ_RESCUER set. 2385 * 2386 * Regular work processing on a gcwq may block trying to create a new 2387 * worker which uses GFP_KERNEL allocation which has slight chance of 2388 * developing into deadlock if some works currently on the same queue 2389 * need to be processed to satisfy the GFP_KERNEL allocation. This is 2390 * the problem rescuer solves. 2391 * 2392 * When such condition is possible, the gcwq summons rescuers of all 2393 * workqueues which have works queued on the gcwq and let them process 2394 * those works so that forward progress can be guaranteed. 2395 * 2396 * This should happen rarely. 2397 */ 2398 static int rescuer_thread(void *__wq) 2399 { 2400 struct workqueue_struct *wq = __wq; 2401 struct worker *rescuer = wq->rescuer; 2402 struct list_head *scheduled = &rescuer->scheduled; 2403 bool is_unbound = wq->flags & WQ_UNBOUND; 2404 unsigned int cpu; 2405 2406 set_user_nice(current, RESCUER_NICE_LEVEL); 2407 repeat: 2408 set_current_state(TASK_INTERRUPTIBLE); 2409 2410 if (kthread_should_stop()) 2411 return 0; 2412 2413 /* 2414 * See whether any cpu is asking for help. Unbounded 2415 * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND. 2416 */ 2417 for_each_mayday_cpu(cpu, wq->mayday_mask) { 2418 unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu; 2419 struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq); 2420 struct worker_pool *pool = cwq->pool; 2421 struct global_cwq *gcwq = pool->gcwq; 2422 struct work_struct *work, *n; 2423 2424 __set_current_state(TASK_RUNNING); 2425 mayday_clear_cpu(cpu, wq->mayday_mask); 2426 2427 /* migrate to the target cpu if possible */ 2428 rescuer->pool = pool; 2429 worker_maybe_bind_and_lock(rescuer); 2430 2431 /* 2432 * Slurp in all works issued via this workqueue and 2433 * process'em. 2434 */ 2435 BUG_ON(!list_empty(&rescuer->scheduled)); 2436 list_for_each_entry_safe(work, n, &pool->worklist, entry) 2437 if (get_work_cwq(work) == cwq) 2438 move_linked_works(work, scheduled, &n); 2439 2440 process_scheduled_works(rescuer); 2441 2442 /* 2443 * Leave this gcwq. If keep_working() is %true, notify a 2444 * regular worker; otherwise, we end up with 0 concurrency 2445 * and stalling the execution. 2446 */ 2447 if (keep_working(pool)) 2448 wake_up_worker(pool); 2449 2450 spin_unlock_irq(&gcwq->lock); 2451 } 2452 2453 schedule(); 2454 goto repeat; 2455 } 2456 2457 struct wq_barrier { 2458 struct work_struct work; 2459 struct completion done; 2460 }; 2461 2462 static void wq_barrier_func(struct work_struct *work) 2463 { 2464 struct wq_barrier *barr = container_of(work, struct wq_barrier, work); 2465 complete(&barr->done); 2466 } 2467 2468 /** 2469 * insert_wq_barrier - insert a barrier work 2470 * @cwq: cwq to insert barrier into 2471 * @barr: wq_barrier to insert 2472 * @target: target work to attach @barr to 2473 * @worker: worker currently executing @target, NULL if @target is not executing 2474 * 2475 * @barr is linked to @target such that @barr is completed only after 2476 * @target finishes execution. Please note that the ordering 2477 * guarantee is observed only with respect to @target and on the local 2478 * cpu. 2479 * 2480 * Currently, a queued barrier can't be canceled. This is because 2481 * try_to_grab_pending() can't determine whether the work to be 2482 * grabbed is at the head of the queue and thus can't clear LINKED 2483 * flag of the previous work while there must be a valid next work 2484 * after a work with LINKED flag set. 2485 * 2486 * Note that when @worker is non-NULL, @target may be modified 2487 * underneath us, so we can't reliably determine cwq from @target. 2488 * 2489 * CONTEXT: 2490 * spin_lock_irq(gcwq->lock). 2491 */ 2492 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, 2493 struct wq_barrier *barr, 2494 struct work_struct *target, struct worker *worker) 2495 { 2496 struct list_head *head; 2497 unsigned int linked = 0; 2498 2499 /* 2500 * debugobject calls are safe here even with gcwq->lock locked 2501 * as we know for sure that this will not trigger any of the 2502 * checks and call back into the fixup functions where we 2503 * might deadlock. 2504 */ 2505 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); 2506 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); 2507 init_completion(&barr->done); 2508 2509 /* 2510 * If @target is currently being executed, schedule the 2511 * barrier to the worker; otherwise, put it after @target. 2512 */ 2513 if (worker) 2514 head = worker->scheduled.next; 2515 else { 2516 unsigned long *bits = work_data_bits(target); 2517 2518 head = target->entry.next; 2519 /* there can already be other linked works, inherit and set */ 2520 linked = *bits & WORK_STRUCT_LINKED; 2521 __set_bit(WORK_STRUCT_LINKED_BIT, bits); 2522 } 2523 2524 debug_work_activate(&barr->work); 2525 insert_work(cwq, &barr->work, head, 2526 work_color_to_flags(WORK_NO_COLOR) | linked); 2527 } 2528 2529 /** 2530 * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing 2531 * @wq: workqueue being flushed 2532 * @flush_color: new flush color, < 0 for no-op 2533 * @work_color: new work color, < 0 for no-op 2534 * 2535 * Prepare cwqs for workqueue flushing. 2536 * 2537 * If @flush_color is non-negative, flush_color on all cwqs should be 2538 * -1. If no cwq has in-flight commands at the specified color, all 2539 * cwq->flush_color's stay at -1 and %false is returned. If any cwq 2540 * has in flight commands, its cwq->flush_color is set to 2541 * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq 2542 * wakeup logic is armed and %true is returned. 2543 * 2544 * The caller should have initialized @wq->first_flusher prior to 2545 * calling this function with non-negative @flush_color. If 2546 * @flush_color is negative, no flush color update is done and %false 2547 * is returned. 2548 * 2549 * If @work_color is non-negative, all cwqs should have the same 2550 * work_color which is previous to @work_color and all will be 2551 * advanced to @work_color. 2552 * 2553 * CONTEXT: 2554 * mutex_lock(wq->flush_mutex). 2555 * 2556 * RETURNS: 2557 * %true if @flush_color >= 0 and there's something to flush. %false 2558 * otherwise. 2559 */ 2560 static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq, 2561 int flush_color, int work_color) 2562 { 2563 bool wait = false; 2564 unsigned int cpu; 2565 2566 if (flush_color >= 0) { 2567 BUG_ON(atomic_read(&wq->nr_cwqs_to_flush)); 2568 atomic_set(&wq->nr_cwqs_to_flush, 1); 2569 } 2570 2571 for_each_cwq_cpu(cpu, wq) { 2572 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 2573 struct global_cwq *gcwq = cwq->pool->gcwq; 2574 2575 spin_lock_irq(&gcwq->lock); 2576 2577 if (flush_color >= 0) { 2578 BUG_ON(cwq->flush_color != -1); 2579 2580 if (cwq->nr_in_flight[flush_color]) { 2581 cwq->flush_color = flush_color; 2582 atomic_inc(&wq->nr_cwqs_to_flush); 2583 wait = true; 2584 } 2585 } 2586 2587 if (work_color >= 0) { 2588 BUG_ON(work_color != work_next_color(cwq->work_color)); 2589 cwq->work_color = work_color; 2590 } 2591 2592 spin_unlock_irq(&gcwq->lock); 2593 } 2594 2595 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush)) 2596 complete(&wq->first_flusher->done); 2597 2598 return wait; 2599 } 2600 2601 /** 2602 * flush_workqueue - ensure that any scheduled work has run to completion. 2603 * @wq: workqueue to flush 2604 * 2605 * Forces execution of the workqueue and blocks until its completion. 2606 * This is typically used in driver shutdown handlers. 2607 * 2608 * We sleep until all works which were queued on entry have been handled, 2609 * but we are not livelocked by new incoming ones. 2610 */ 2611 void flush_workqueue(struct workqueue_struct *wq) 2612 { 2613 struct wq_flusher this_flusher = { 2614 .list = LIST_HEAD_INIT(this_flusher.list), 2615 .flush_color = -1, 2616 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done), 2617 }; 2618 int next_color; 2619 2620 lock_map_acquire(&wq->lockdep_map); 2621 lock_map_release(&wq->lockdep_map); 2622 2623 mutex_lock(&wq->flush_mutex); 2624 2625 /* 2626 * Start-to-wait phase 2627 */ 2628 next_color = work_next_color(wq->work_color); 2629 2630 if (next_color != wq->flush_color) { 2631 /* 2632 * Color space is not full. The current work_color 2633 * becomes our flush_color and work_color is advanced 2634 * by one. 2635 */ 2636 BUG_ON(!list_empty(&wq->flusher_overflow)); 2637 this_flusher.flush_color = wq->work_color; 2638 wq->work_color = next_color; 2639 2640 if (!wq->first_flusher) { 2641 /* no flush in progress, become the first flusher */ 2642 BUG_ON(wq->flush_color != this_flusher.flush_color); 2643 2644 wq->first_flusher = &this_flusher; 2645 2646 if (!flush_workqueue_prep_cwqs(wq, wq->flush_color, 2647 wq->work_color)) { 2648 /* nothing to flush, done */ 2649 wq->flush_color = next_color; 2650 wq->first_flusher = NULL; 2651 goto out_unlock; 2652 } 2653 } else { 2654 /* wait in queue */ 2655 BUG_ON(wq->flush_color == this_flusher.flush_color); 2656 list_add_tail(&this_flusher.list, &wq->flusher_queue); 2657 flush_workqueue_prep_cwqs(wq, -1, wq->work_color); 2658 } 2659 } else { 2660 /* 2661 * Oops, color space is full, wait on overflow queue. 2662 * The next flush completion will assign us 2663 * flush_color and transfer to flusher_queue. 2664 */ 2665 list_add_tail(&this_flusher.list, &wq->flusher_overflow); 2666 } 2667 2668 mutex_unlock(&wq->flush_mutex); 2669 2670 wait_for_completion(&this_flusher.done); 2671 2672 /* 2673 * Wake-up-and-cascade phase 2674 * 2675 * First flushers are responsible for cascading flushes and 2676 * handling overflow. Non-first flushers can simply return. 2677 */ 2678 if (wq->first_flusher != &this_flusher) 2679 return; 2680 2681 mutex_lock(&wq->flush_mutex); 2682 2683 /* we might have raced, check again with mutex held */ 2684 if (wq->first_flusher != &this_flusher) 2685 goto out_unlock; 2686 2687 wq->first_flusher = NULL; 2688 2689 BUG_ON(!list_empty(&this_flusher.list)); 2690 BUG_ON(wq->flush_color != this_flusher.flush_color); 2691 2692 while (true) { 2693 struct wq_flusher *next, *tmp; 2694 2695 /* complete all the flushers sharing the current flush color */ 2696 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { 2697 if (next->flush_color != wq->flush_color) 2698 break; 2699 list_del_init(&next->list); 2700 complete(&next->done); 2701 } 2702 2703 BUG_ON(!list_empty(&wq->flusher_overflow) && 2704 wq->flush_color != work_next_color(wq->work_color)); 2705 2706 /* this flush_color is finished, advance by one */ 2707 wq->flush_color = work_next_color(wq->flush_color); 2708 2709 /* one color has been freed, handle overflow queue */ 2710 if (!list_empty(&wq->flusher_overflow)) { 2711 /* 2712 * Assign the same color to all overflowed 2713 * flushers, advance work_color and append to 2714 * flusher_queue. This is the start-to-wait 2715 * phase for these overflowed flushers. 2716 */ 2717 list_for_each_entry(tmp, &wq->flusher_overflow, list) 2718 tmp->flush_color = wq->work_color; 2719 2720 wq->work_color = work_next_color(wq->work_color); 2721 2722 list_splice_tail_init(&wq->flusher_overflow, 2723 &wq->flusher_queue); 2724 flush_workqueue_prep_cwqs(wq, -1, wq->work_color); 2725 } 2726 2727 if (list_empty(&wq->flusher_queue)) { 2728 BUG_ON(wq->flush_color != wq->work_color); 2729 break; 2730 } 2731 2732 /* 2733 * Need to flush more colors. Make the next flusher 2734 * the new first flusher and arm cwqs. 2735 */ 2736 BUG_ON(wq->flush_color == wq->work_color); 2737 BUG_ON(wq->flush_color != next->flush_color); 2738 2739 list_del_init(&next->list); 2740 wq->first_flusher = next; 2741 2742 if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1)) 2743 break; 2744 2745 /* 2746 * Meh... this color is already done, clear first 2747 * flusher and repeat cascading. 2748 */ 2749 wq->first_flusher = NULL; 2750 } 2751 2752 out_unlock: 2753 mutex_unlock(&wq->flush_mutex); 2754 } 2755 EXPORT_SYMBOL_GPL(flush_workqueue); 2756 2757 /** 2758 * drain_workqueue - drain a workqueue 2759 * @wq: workqueue to drain 2760 * 2761 * Wait until the workqueue becomes empty. While draining is in progress, 2762 * only chain queueing is allowed. IOW, only currently pending or running 2763 * work items on @wq can queue further work items on it. @wq is flushed 2764 * repeatedly until it becomes empty. The number of flushing is detemined 2765 * by the depth of chaining and should be relatively short. Whine if it 2766 * takes too long. 2767 */ 2768 void drain_workqueue(struct workqueue_struct *wq) 2769 { 2770 unsigned int flush_cnt = 0; 2771 unsigned int cpu; 2772 2773 /* 2774 * __queue_work() needs to test whether there are drainers, is much 2775 * hotter than drain_workqueue() and already looks at @wq->flags. 2776 * Use WQ_DRAINING so that queue doesn't have to check nr_drainers. 2777 */ 2778 spin_lock(&workqueue_lock); 2779 if (!wq->nr_drainers++) 2780 wq->flags |= WQ_DRAINING; 2781 spin_unlock(&workqueue_lock); 2782 reflush: 2783 flush_workqueue(wq); 2784 2785 for_each_cwq_cpu(cpu, wq) { 2786 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 2787 bool drained; 2788 2789 spin_lock_irq(&cwq->pool->gcwq->lock); 2790 drained = !cwq->nr_active && list_empty(&cwq->delayed_works); 2791 spin_unlock_irq(&cwq->pool->gcwq->lock); 2792 2793 if (drained) 2794 continue; 2795 2796 if (++flush_cnt == 10 || 2797 (flush_cnt % 100 == 0 && flush_cnt <= 1000)) 2798 pr_warn("workqueue %s: flush on destruction isn't complete after %u tries\n", 2799 wq->name, flush_cnt); 2800 goto reflush; 2801 } 2802 2803 spin_lock(&workqueue_lock); 2804 if (!--wq->nr_drainers) 2805 wq->flags &= ~WQ_DRAINING; 2806 spin_unlock(&workqueue_lock); 2807 } 2808 EXPORT_SYMBOL_GPL(drain_workqueue); 2809 2810 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) 2811 { 2812 struct worker *worker = NULL; 2813 struct global_cwq *gcwq; 2814 struct cpu_workqueue_struct *cwq; 2815 2816 might_sleep(); 2817 gcwq = get_work_gcwq(work); 2818 if (!gcwq) 2819 return false; 2820 2821 spin_lock_irq(&gcwq->lock); 2822 if (!list_empty(&work->entry)) { 2823 /* 2824 * See the comment near try_to_grab_pending()->smp_rmb(). 2825 * If it was re-queued to a different gcwq under us, we 2826 * are not going to wait. 2827 */ 2828 smp_rmb(); 2829 cwq = get_work_cwq(work); 2830 if (unlikely(!cwq || gcwq != cwq->pool->gcwq)) 2831 goto already_gone; 2832 } else { 2833 worker = find_worker_executing_work(gcwq, work); 2834 if (!worker) 2835 goto already_gone; 2836 cwq = worker->current_cwq; 2837 } 2838 2839 insert_wq_barrier(cwq, barr, work, worker); 2840 spin_unlock_irq(&gcwq->lock); 2841 2842 /* 2843 * If @max_active is 1 or rescuer is in use, flushing another work 2844 * item on the same workqueue may lead to deadlock. Make sure the 2845 * flusher is not running on the same workqueue by verifying write 2846 * access. 2847 */ 2848 if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER) 2849 lock_map_acquire(&cwq->wq->lockdep_map); 2850 else 2851 lock_map_acquire_read(&cwq->wq->lockdep_map); 2852 lock_map_release(&cwq->wq->lockdep_map); 2853 2854 return true; 2855 already_gone: 2856 spin_unlock_irq(&gcwq->lock); 2857 return false; 2858 } 2859 2860 /** 2861 * flush_work - wait for a work to finish executing the last queueing instance 2862 * @work: the work to flush 2863 * 2864 * Wait until @work has finished execution. @work is guaranteed to be idle 2865 * on return if it hasn't been requeued since flush started. 2866 * 2867 * RETURNS: 2868 * %true if flush_work() waited for the work to finish execution, 2869 * %false if it was already idle. 2870 */ 2871 bool flush_work(struct work_struct *work) 2872 { 2873 struct wq_barrier barr; 2874 2875 lock_map_acquire(&work->lockdep_map); 2876 lock_map_release(&work->lockdep_map); 2877 2878 if (start_flush_work(work, &barr)) { 2879 wait_for_completion(&barr.done); 2880 destroy_work_on_stack(&barr.work); 2881 return true; 2882 } else { 2883 return false; 2884 } 2885 } 2886 EXPORT_SYMBOL_GPL(flush_work); 2887 2888 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) 2889 { 2890 unsigned long flags; 2891 int ret; 2892 2893 do { 2894 ret = try_to_grab_pending(work, is_dwork, &flags); 2895 /* 2896 * If someone else is canceling, wait for the same event it 2897 * would be waiting for before retrying. 2898 */ 2899 if (unlikely(ret == -ENOENT)) 2900 flush_work(work); 2901 } while (unlikely(ret < 0)); 2902 2903 /* tell other tasks trying to grab @work to back off */ 2904 mark_work_canceling(work); 2905 local_irq_restore(flags); 2906 2907 flush_work(work); 2908 clear_work_data(work); 2909 return ret; 2910 } 2911 2912 /** 2913 * cancel_work_sync - cancel a work and wait for it to finish 2914 * @work: the work to cancel 2915 * 2916 * Cancel @work and wait for its execution to finish. This function 2917 * can be used even if the work re-queues itself or migrates to 2918 * another workqueue. On return from this function, @work is 2919 * guaranteed to be not pending or executing on any CPU. 2920 * 2921 * cancel_work_sync(&delayed_work->work) must not be used for 2922 * delayed_work's. Use cancel_delayed_work_sync() instead. 2923 * 2924 * The caller must ensure that the workqueue on which @work was last 2925 * queued can't be destroyed before this function returns. 2926 * 2927 * RETURNS: 2928 * %true if @work was pending, %false otherwise. 2929 */ 2930 bool cancel_work_sync(struct work_struct *work) 2931 { 2932 return __cancel_work_timer(work, false); 2933 } 2934 EXPORT_SYMBOL_GPL(cancel_work_sync); 2935 2936 /** 2937 * flush_delayed_work - wait for a dwork to finish executing the last queueing 2938 * @dwork: the delayed work to flush 2939 * 2940 * Delayed timer is cancelled and the pending work is queued for 2941 * immediate execution. Like flush_work(), this function only 2942 * considers the last queueing instance of @dwork. 2943 * 2944 * RETURNS: 2945 * %true if flush_work() waited for the work to finish execution, 2946 * %false if it was already idle. 2947 */ 2948 bool flush_delayed_work(struct delayed_work *dwork) 2949 { 2950 local_irq_disable(); 2951 if (del_timer_sync(&dwork->timer)) 2952 __queue_work(dwork->cpu, 2953 get_work_cwq(&dwork->work)->wq, &dwork->work); 2954 local_irq_enable(); 2955 return flush_work(&dwork->work); 2956 } 2957 EXPORT_SYMBOL(flush_delayed_work); 2958 2959 /** 2960 * cancel_delayed_work - cancel a delayed work 2961 * @dwork: delayed_work to cancel 2962 * 2963 * Kill off a pending delayed_work. Returns %true if @dwork was pending 2964 * and canceled; %false if wasn't pending. Note that the work callback 2965 * function may still be running on return, unless it returns %true and the 2966 * work doesn't re-arm itself. Explicitly flush or use 2967 * cancel_delayed_work_sync() to wait on it. 2968 * 2969 * This function is safe to call from any context including IRQ handler. 2970 */ 2971 bool cancel_delayed_work(struct delayed_work *dwork) 2972 { 2973 unsigned long flags; 2974 int ret; 2975 2976 do { 2977 ret = try_to_grab_pending(&dwork->work, true, &flags); 2978 } while (unlikely(ret == -EAGAIN)); 2979 2980 if (unlikely(ret < 0)) 2981 return false; 2982 2983 set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work)); 2984 local_irq_restore(flags); 2985 return ret; 2986 } 2987 EXPORT_SYMBOL(cancel_delayed_work); 2988 2989 /** 2990 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish 2991 * @dwork: the delayed work cancel 2992 * 2993 * This is cancel_work_sync() for delayed works. 2994 * 2995 * RETURNS: 2996 * %true if @dwork was pending, %false otherwise. 2997 */ 2998 bool cancel_delayed_work_sync(struct delayed_work *dwork) 2999 { 3000 return __cancel_work_timer(&dwork->work, true); 3001 } 3002 EXPORT_SYMBOL(cancel_delayed_work_sync); 3003 3004 /** 3005 * schedule_work_on - put work task on a specific cpu 3006 * @cpu: cpu to put the work task on 3007 * @work: job to be done 3008 * 3009 * This puts a job on a specific cpu 3010 */ 3011 bool schedule_work_on(int cpu, struct work_struct *work) 3012 { 3013 return queue_work_on(cpu, system_wq, work); 3014 } 3015 EXPORT_SYMBOL(schedule_work_on); 3016 3017 /** 3018 * schedule_work - put work task in global workqueue 3019 * @work: job to be done 3020 * 3021 * Returns %false if @work was already on the kernel-global workqueue and 3022 * %true otherwise. 3023 * 3024 * This puts a job in the kernel-global workqueue if it was not already 3025 * queued and leaves it in the same position on the kernel-global 3026 * workqueue otherwise. 3027 */ 3028 bool schedule_work(struct work_struct *work) 3029 { 3030 return queue_work(system_wq, work); 3031 } 3032 EXPORT_SYMBOL(schedule_work); 3033 3034 /** 3035 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 3036 * @cpu: cpu to use 3037 * @dwork: job to be done 3038 * @delay: number of jiffies to wait 3039 * 3040 * After waiting for a given time this puts a job in the kernel-global 3041 * workqueue on the specified CPU. 3042 */ 3043 bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork, 3044 unsigned long delay) 3045 { 3046 return queue_delayed_work_on(cpu, system_wq, dwork, delay); 3047 } 3048 EXPORT_SYMBOL(schedule_delayed_work_on); 3049 3050 /** 3051 * schedule_delayed_work - put work task in global workqueue after delay 3052 * @dwork: job to be done 3053 * @delay: number of jiffies to wait or 0 for immediate execution 3054 * 3055 * After waiting for a given time this puts a job in the kernel-global 3056 * workqueue. 3057 */ 3058 bool schedule_delayed_work(struct delayed_work *dwork, unsigned long delay) 3059 { 3060 return queue_delayed_work(system_wq, dwork, delay); 3061 } 3062 EXPORT_SYMBOL(schedule_delayed_work); 3063 3064 /** 3065 * schedule_on_each_cpu - execute a function synchronously on each online CPU 3066 * @func: the function to call 3067 * 3068 * schedule_on_each_cpu() executes @func on each online CPU using the 3069 * system workqueue and blocks until all CPUs have completed. 3070 * schedule_on_each_cpu() is very slow. 3071 * 3072 * RETURNS: 3073 * 0 on success, -errno on failure. 3074 */ 3075 int schedule_on_each_cpu(work_func_t func) 3076 { 3077 int cpu; 3078 struct work_struct __percpu *works; 3079 3080 works = alloc_percpu(struct work_struct); 3081 if (!works) 3082 return -ENOMEM; 3083 3084 get_online_cpus(); 3085 3086 for_each_online_cpu(cpu) { 3087 struct work_struct *work = per_cpu_ptr(works, cpu); 3088 3089 INIT_WORK(work, func); 3090 schedule_work_on(cpu, work); 3091 } 3092 3093 for_each_online_cpu(cpu) 3094 flush_work(per_cpu_ptr(works, cpu)); 3095 3096 put_online_cpus(); 3097 free_percpu(works); 3098 return 0; 3099 } 3100 3101 /** 3102 * flush_scheduled_work - ensure that any scheduled work has run to completion. 3103 * 3104 * Forces execution of the kernel-global workqueue and blocks until its 3105 * completion. 3106 * 3107 * Think twice before calling this function! It's very easy to get into 3108 * trouble if you don't take great care. Either of the following situations 3109 * will lead to deadlock: 3110 * 3111 * One of the work items currently on the workqueue needs to acquire 3112 * a lock held by your code or its caller. 3113 * 3114 * Your code is running in the context of a work routine. 3115 * 3116 * They will be detected by lockdep when they occur, but the first might not 3117 * occur very often. It depends on what work items are on the workqueue and 3118 * what locks they need, which you have no control over. 3119 * 3120 * In most situations flushing the entire workqueue is overkill; you merely 3121 * need to know that a particular work item isn't queued and isn't running. 3122 * In such cases you should use cancel_delayed_work_sync() or 3123 * cancel_work_sync() instead. 3124 */ 3125 void flush_scheduled_work(void) 3126 { 3127 flush_workqueue(system_wq); 3128 } 3129 EXPORT_SYMBOL(flush_scheduled_work); 3130 3131 /** 3132 * execute_in_process_context - reliably execute the routine with user context 3133 * @fn: the function to execute 3134 * @ew: guaranteed storage for the execute work structure (must 3135 * be available when the work executes) 3136 * 3137 * Executes the function immediately if process context is available, 3138 * otherwise schedules the function for delayed execution. 3139 * 3140 * Returns: 0 - function was executed 3141 * 1 - function was scheduled for execution 3142 */ 3143 int execute_in_process_context(work_func_t fn, struct execute_work *ew) 3144 { 3145 if (!in_interrupt()) { 3146 fn(&ew->work); 3147 return 0; 3148 } 3149 3150 INIT_WORK(&ew->work, fn); 3151 schedule_work(&ew->work); 3152 3153 return 1; 3154 } 3155 EXPORT_SYMBOL_GPL(execute_in_process_context); 3156 3157 int keventd_up(void) 3158 { 3159 return system_wq != NULL; 3160 } 3161 3162 static int alloc_cwqs(struct workqueue_struct *wq) 3163 { 3164 /* 3165 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS. 3166 * Make sure that the alignment isn't lower than that of 3167 * unsigned long long. 3168 */ 3169 const size_t size = sizeof(struct cpu_workqueue_struct); 3170 const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS, 3171 __alignof__(unsigned long long)); 3172 3173 if (!(wq->flags & WQ_UNBOUND)) 3174 wq->cpu_wq.pcpu = __alloc_percpu(size, align); 3175 else { 3176 void *ptr; 3177 3178 /* 3179 * Allocate enough room to align cwq and put an extra 3180 * pointer at the end pointing back to the originally 3181 * allocated pointer which will be used for free. 3182 */ 3183 ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL); 3184 if (ptr) { 3185 wq->cpu_wq.single = PTR_ALIGN(ptr, align); 3186 *(void **)(wq->cpu_wq.single + 1) = ptr; 3187 } 3188 } 3189 3190 /* just in case, make sure it's actually aligned */ 3191 BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align)); 3192 return wq->cpu_wq.v ? 0 : -ENOMEM; 3193 } 3194 3195 static void free_cwqs(struct workqueue_struct *wq) 3196 { 3197 if (!(wq->flags & WQ_UNBOUND)) 3198 free_percpu(wq->cpu_wq.pcpu); 3199 else if (wq->cpu_wq.single) { 3200 /* the pointer to free is stored right after the cwq */ 3201 kfree(*(void **)(wq->cpu_wq.single + 1)); 3202 } 3203 } 3204 3205 static int wq_clamp_max_active(int max_active, unsigned int flags, 3206 const char *name) 3207 { 3208 int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE; 3209 3210 if (max_active < 1 || max_active > lim) 3211 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n", 3212 max_active, name, 1, lim); 3213 3214 return clamp_val(max_active, 1, lim); 3215 } 3216 3217 struct workqueue_struct *__alloc_workqueue_key(const char *fmt, 3218 unsigned int flags, 3219 int max_active, 3220 struct lock_class_key *key, 3221 const char *lock_name, ...) 3222 { 3223 va_list args, args1; 3224 struct workqueue_struct *wq; 3225 unsigned int cpu; 3226 size_t namelen; 3227 3228 /* determine namelen, allocate wq and format name */ 3229 va_start(args, lock_name); 3230 va_copy(args1, args); 3231 namelen = vsnprintf(NULL, 0, fmt, args) + 1; 3232 3233 wq = kzalloc(sizeof(*wq) + namelen, GFP_KERNEL); 3234 if (!wq) 3235 goto err; 3236 3237 vsnprintf(wq->name, namelen, fmt, args1); 3238 va_end(args); 3239 va_end(args1); 3240 3241 /* 3242 * Workqueues which may be used during memory reclaim should 3243 * have a rescuer to guarantee forward progress. 3244 */ 3245 if (flags & WQ_MEM_RECLAIM) 3246 flags |= WQ_RESCUER; 3247 3248 max_active = max_active ?: WQ_DFL_ACTIVE; 3249 max_active = wq_clamp_max_active(max_active, flags, wq->name); 3250 3251 /* init wq */ 3252 wq->flags = flags; 3253 wq->saved_max_active = max_active; 3254 mutex_init(&wq->flush_mutex); 3255 atomic_set(&wq->nr_cwqs_to_flush, 0); 3256 INIT_LIST_HEAD(&wq->flusher_queue); 3257 INIT_LIST_HEAD(&wq->flusher_overflow); 3258 3259 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); 3260 INIT_LIST_HEAD(&wq->list); 3261 3262 if (alloc_cwqs(wq) < 0) 3263 goto err; 3264 3265 for_each_cwq_cpu(cpu, wq) { 3266 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3267 struct global_cwq *gcwq = get_gcwq(cpu); 3268 int pool_idx = (bool)(flags & WQ_HIGHPRI); 3269 3270 BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK); 3271 cwq->pool = &gcwq->pools[pool_idx]; 3272 cwq->wq = wq; 3273 cwq->flush_color = -1; 3274 cwq->max_active = max_active; 3275 INIT_LIST_HEAD(&cwq->delayed_works); 3276 } 3277 3278 if (flags & WQ_RESCUER) { 3279 struct worker *rescuer; 3280 3281 if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL)) 3282 goto err; 3283 3284 wq->rescuer = rescuer = alloc_worker(); 3285 if (!rescuer) 3286 goto err; 3287 3288 rescuer->task = kthread_create(rescuer_thread, wq, "%s", 3289 wq->name); 3290 if (IS_ERR(rescuer->task)) 3291 goto err; 3292 3293 rescuer->task->flags |= PF_THREAD_BOUND; 3294 wake_up_process(rescuer->task); 3295 } 3296 3297 /* 3298 * workqueue_lock protects global freeze state and workqueues 3299 * list. Grab it, set max_active accordingly and add the new 3300 * workqueue to workqueues list. 3301 */ 3302 spin_lock(&workqueue_lock); 3303 3304 if (workqueue_freezing && wq->flags & WQ_FREEZABLE) 3305 for_each_cwq_cpu(cpu, wq) 3306 get_cwq(cpu, wq)->max_active = 0; 3307 3308 list_add(&wq->list, &workqueues); 3309 3310 spin_unlock(&workqueue_lock); 3311 3312 return wq; 3313 err: 3314 if (wq) { 3315 free_cwqs(wq); 3316 free_mayday_mask(wq->mayday_mask); 3317 kfree(wq->rescuer); 3318 kfree(wq); 3319 } 3320 return NULL; 3321 } 3322 EXPORT_SYMBOL_GPL(__alloc_workqueue_key); 3323 3324 /** 3325 * destroy_workqueue - safely terminate a workqueue 3326 * @wq: target workqueue 3327 * 3328 * Safely destroy a workqueue. All work currently pending will be done first. 3329 */ 3330 void destroy_workqueue(struct workqueue_struct *wq) 3331 { 3332 unsigned int cpu; 3333 3334 /* drain it before proceeding with destruction */ 3335 drain_workqueue(wq); 3336 3337 /* 3338 * wq list is used to freeze wq, remove from list after 3339 * flushing is complete in case freeze races us. 3340 */ 3341 spin_lock(&workqueue_lock); 3342 list_del(&wq->list); 3343 spin_unlock(&workqueue_lock); 3344 3345 /* sanity check */ 3346 for_each_cwq_cpu(cpu, wq) { 3347 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3348 int i; 3349 3350 for (i = 0; i < WORK_NR_COLORS; i++) 3351 BUG_ON(cwq->nr_in_flight[i]); 3352 BUG_ON(cwq->nr_active); 3353 BUG_ON(!list_empty(&cwq->delayed_works)); 3354 } 3355 3356 if (wq->flags & WQ_RESCUER) { 3357 kthread_stop(wq->rescuer->task); 3358 free_mayday_mask(wq->mayday_mask); 3359 kfree(wq->rescuer); 3360 } 3361 3362 free_cwqs(wq); 3363 kfree(wq); 3364 } 3365 EXPORT_SYMBOL_GPL(destroy_workqueue); 3366 3367 /** 3368 * cwq_set_max_active - adjust max_active of a cwq 3369 * @cwq: target cpu_workqueue_struct 3370 * @max_active: new max_active value. 3371 * 3372 * Set @cwq->max_active to @max_active and activate delayed works if 3373 * increased. 3374 * 3375 * CONTEXT: 3376 * spin_lock_irq(gcwq->lock). 3377 */ 3378 static void cwq_set_max_active(struct cpu_workqueue_struct *cwq, int max_active) 3379 { 3380 cwq->max_active = max_active; 3381 3382 while (!list_empty(&cwq->delayed_works) && 3383 cwq->nr_active < cwq->max_active) 3384 cwq_activate_first_delayed(cwq); 3385 } 3386 3387 /** 3388 * workqueue_set_max_active - adjust max_active of a workqueue 3389 * @wq: target workqueue 3390 * @max_active: new max_active value. 3391 * 3392 * Set max_active of @wq to @max_active. 3393 * 3394 * CONTEXT: 3395 * Don't call from IRQ context. 3396 */ 3397 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) 3398 { 3399 unsigned int cpu; 3400 3401 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); 3402 3403 spin_lock(&workqueue_lock); 3404 3405 wq->saved_max_active = max_active; 3406 3407 for_each_cwq_cpu(cpu, wq) { 3408 struct global_cwq *gcwq = get_gcwq(cpu); 3409 3410 spin_lock_irq(&gcwq->lock); 3411 3412 if (!(wq->flags & WQ_FREEZABLE) || 3413 !(gcwq->flags & GCWQ_FREEZING)) 3414 cwq_set_max_active(get_cwq(gcwq->cpu, wq), max_active); 3415 3416 spin_unlock_irq(&gcwq->lock); 3417 } 3418 3419 spin_unlock(&workqueue_lock); 3420 } 3421 EXPORT_SYMBOL_GPL(workqueue_set_max_active); 3422 3423 /** 3424 * workqueue_congested - test whether a workqueue is congested 3425 * @cpu: CPU in question 3426 * @wq: target workqueue 3427 * 3428 * Test whether @wq's cpu workqueue for @cpu is congested. There is 3429 * no synchronization around this function and the test result is 3430 * unreliable and only useful as advisory hints or for debugging. 3431 * 3432 * RETURNS: 3433 * %true if congested, %false otherwise. 3434 */ 3435 bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq) 3436 { 3437 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3438 3439 return !list_empty(&cwq->delayed_works); 3440 } 3441 EXPORT_SYMBOL_GPL(workqueue_congested); 3442 3443 /** 3444 * work_cpu - return the last known associated cpu for @work 3445 * @work: the work of interest 3446 * 3447 * RETURNS: 3448 * CPU number if @work was ever queued. WORK_CPU_NONE otherwise. 3449 */ 3450 unsigned int work_cpu(struct work_struct *work) 3451 { 3452 struct global_cwq *gcwq = get_work_gcwq(work); 3453 3454 return gcwq ? gcwq->cpu : WORK_CPU_NONE; 3455 } 3456 EXPORT_SYMBOL_GPL(work_cpu); 3457 3458 /** 3459 * work_busy - test whether a work is currently pending or running 3460 * @work: the work to be tested 3461 * 3462 * Test whether @work is currently pending or running. There is no 3463 * synchronization around this function and the test result is 3464 * unreliable and only useful as advisory hints or for debugging. 3465 * Especially for reentrant wqs, the pending state might hide the 3466 * running state. 3467 * 3468 * RETURNS: 3469 * OR'd bitmask of WORK_BUSY_* bits. 3470 */ 3471 unsigned int work_busy(struct work_struct *work) 3472 { 3473 struct global_cwq *gcwq = get_work_gcwq(work); 3474 unsigned long flags; 3475 unsigned int ret = 0; 3476 3477 if (!gcwq) 3478 return false; 3479 3480 spin_lock_irqsave(&gcwq->lock, flags); 3481 3482 if (work_pending(work)) 3483 ret |= WORK_BUSY_PENDING; 3484 if (find_worker_executing_work(gcwq, work)) 3485 ret |= WORK_BUSY_RUNNING; 3486 3487 spin_unlock_irqrestore(&gcwq->lock, flags); 3488 3489 return ret; 3490 } 3491 EXPORT_SYMBOL_GPL(work_busy); 3492 3493 /* 3494 * CPU hotplug. 3495 * 3496 * There are two challenges in supporting CPU hotplug. Firstly, there 3497 * are a lot of assumptions on strong associations among work, cwq and 3498 * gcwq which make migrating pending and scheduled works very 3499 * difficult to implement without impacting hot paths. Secondly, 3500 * gcwqs serve mix of short, long and very long running works making 3501 * blocked draining impractical. 3502 * 3503 * This is solved by allowing a gcwq to be disassociated from the CPU 3504 * running as an unbound one and allowing it to be reattached later if the 3505 * cpu comes back online. 3506 */ 3507 3508 /* claim manager positions of all pools */ 3509 static void gcwq_claim_assoc_and_lock(struct global_cwq *gcwq) 3510 { 3511 struct worker_pool *pool; 3512 3513 for_each_worker_pool(pool, gcwq) 3514 mutex_lock_nested(&pool->assoc_mutex, pool - gcwq->pools); 3515 spin_lock_irq(&gcwq->lock); 3516 } 3517 3518 /* release manager positions */ 3519 static void gcwq_release_assoc_and_unlock(struct global_cwq *gcwq) 3520 { 3521 struct worker_pool *pool; 3522 3523 spin_unlock_irq(&gcwq->lock); 3524 for_each_worker_pool(pool, gcwq) 3525 mutex_unlock(&pool->assoc_mutex); 3526 } 3527 3528 static void gcwq_unbind_fn(struct work_struct *work) 3529 { 3530 struct global_cwq *gcwq = get_gcwq(smp_processor_id()); 3531 struct worker_pool *pool; 3532 struct worker *worker; 3533 struct hlist_node *pos; 3534 int i; 3535 3536 BUG_ON(gcwq->cpu != smp_processor_id()); 3537 3538 gcwq_claim_assoc_and_lock(gcwq); 3539 3540 /* 3541 * We've claimed all manager positions. Make all workers unbound 3542 * and set DISASSOCIATED. Before this, all workers except for the 3543 * ones which are still executing works from before the last CPU 3544 * down must be on the cpu. After this, they may become diasporas. 3545 */ 3546 for_each_worker_pool(pool, gcwq) 3547 list_for_each_entry(worker, &pool->idle_list, entry) 3548 worker->flags |= WORKER_UNBOUND; 3549 3550 for_each_busy_worker(worker, i, pos, gcwq) 3551 worker->flags |= WORKER_UNBOUND; 3552 3553 gcwq->flags |= GCWQ_DISASSOCIATED; 3554 3555 gcwq_release_assoc_and_unlock(gcwq); 3556 3557 /* 3558 * Call schedule() so that we cross rq->lock and thus can guarantee 3559 * sched callbacks see the %WORKER_UNBOUND flag. This is necessary 3560 * as scheduler callbacks may be invoked from other cpus. 3561 */ 3562 schedule(); 3563 3564 /* 3565 * Sched callbacks are disabled now. Zap nr_running. After this, 3566 * nr_running stays zero and need_more_worker() and keep_working() 3567 * are always true as long as the worklist is not empty. @gcwq now 3568 * behaves as unbound (in terms of concurrency management) gcwq 3569 * which is served by workers tied to the CPU. 3570 * 3571 * On return from this function, the current worker would trigger 3572 * unbound chain execution of pending work items if other workers 3573 * didn't already. 3574 */ 3575 for_each_worker_pool(pool, gcwq) 3576 atomic_set(get_pool_nr_running(pool), 0); 3577 } 3578 3579 /* 3580 * Workqueues should be brought up before normal priority CPU notifiers. 3581 * This will be registered high priority CPU notifier. 3582 */ 3583 static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, 3584 unsigned long action, 3585 void *hcpu) 3586 { 3587 unsigned int cpu = (unsigned long)hcpu; 3588 struct global_cwq *gcwq = get_gcwq(cpu); 3589 struct worker_pool *pool; 3590 3591 switch (action & ~CPU_TASKS_FROZEN) { 3592 case CPU_UP_PREPARE: 3593 for_each_worker_pool(pool, gcwq) { 3594 struct worker *worker; 3595 3596 if (pool->nr_workers) 3597 continue; 3598 3599 worker = create_worker(pool); 3600 if (!worker) 3601 return NOTIFY_BAD; 3602 3603 spin_lock_irq(&gcwq->lock); 3604 start_worker(worker); 3605 spin_unlock_irq(&gcwq->lock); 3606 } 3607 break; 3608 3609 case CPU_DOWN_FAILED: 3610 case CPU_ONLINE: 3611 gcwq_claim_assoc_and_lock(gcwq); 3612 gcwq->flags &= ~GCWQ_DISASSOCIATED; 3613 rebind_workers(gcwq); 3614 gcwq_release_assoc_and_unlock(gcwq); 3615 break; 3616 } 3617 return NOTIFY_OK; 3618 } 3619 3620 /* 3621 * Workqueues should be brought down after normal priority CPU notifiers. 3622 * This will be registered as low priority CPU notifier. 3623 */ 3624 static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb, 3625 unsigned long action, 3626 void *hcpu) 3627 { 3628 unsigned int cpu = (unsigned long)hcpu; 3629 struct work_struct unbind_work; 3630 3631 switch (action & ~CPU_TASKS_FROZEN) { 3632 case CPU_DOWN_PREPARE: 3633 /* unbinding should happen on the local CPU */ 3634 INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn); 3635 queue_work_on(cpu, system_highpri_wq, &unbind_work); 3636 flush_work(&unbind_work); 3637 break; 3638 } 3639 return NOTIFY_OK; 3640 } 3641 3642 #ifdef CONFIG_SMP 3643 3644 struct work_for_cpu { 3645 struct work_struct work; 3646 long (*fn)(void *); 3647 void *arg; 3648 long ret; 3649 }; 3650 3651 static void work_for_cpu_fn(struct work_struct *work) 3652 { 3653 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); 3654 3655 wfc->ret = wfc->fn(wfc->arg); 3656 } 3657 3658 /** 3659 * work_on_cpu - run a function in user context on a particular cpu 3660 * @cpu: the cpu to run on 3661 * @fn: the function to run 3662 * @arg: the function arg 3663 * 3664 * This will return the value @fn returns. 3665 * It is up to the caller to ensure that the cpu doesn't go offline. 3666 * The caller must not hold any locks which would prevent @fn from completing. 3667 */ 3668 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) 3669 { 3670 struct work_for_cpu wfc = { .fn = fn, .arg = arg }; 3671 3672 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); 3673 schedule_work_on(cpu, &wfc.work); 3674 flush_work(&wfc.work); 3675 return wfc.ret; 3676 } 3677 EXPORT_SYMBOL_GPL(work_on_cpu); 3678 #endif /* CONFIG_SMP */ 3679 3680 #ifdef CONFIG_FREEZER 3681 3682 /** 3683 * freeze_workqueues_begin - begin freezing workqueues 3684 * 3685 * Start freezing workqueues. After this function returns, all freezable 3686 * workqueues will queue new works to their frozen_works list instead of 3687 * gcwq->worklist. 3688 * 3689 * CONTEXT: 3690 * Grabs and releases workqueue_lock and gcwq->lock's. 3691 */ 3692 void freeze_workqueues_begin(void) 3693 { 3694 unsigned int cpu; 3695 3696 spin_lock(&workqueue_lock); 3697 3698 BUG_ON(workqueue_freezing); 3699 workqueue_freezing = true; 3700 3701 for_each_gcwq_cpu(cpu) { 3702 struct global_cwq *gcwq = get_gcwq(cpu); 3703 struct workqueue_struct *wq; 3704 3705 spin_lock_irq(&gcwq->lock); 3706 3707 BUG_ON(gcwq->flags & GCWQ_FREEZING); 3708 gcwq->flags |= GCWQ_FREEZING; 3709 3710 list_for_each_entry(wq, &workqueues, list) { 3711 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3712 3713 if (cwq && wq->flags & WQ_FREEZABLE) 3714 cwq->max_active = 0; 3715 } 3716 3717 spin_unlock_irq(&gcwq->lock); 3718 } 3719 3720 spin_unlock(&workqueue_lock); 3721 } 3722 3723 /** 3724 * freeze_workqueues_busy - are freezable workqueues still busy? 3725 * 3726 * Check whether freezing is complete. This function must be called 3727 * between freeze_workqueues_begin() and thaw_workqueues(). 3728 * 3729 * CONTEXT: 3730 * Grabs and releases workqueue_lock. 3731 * 3732 * RETURNS: 3733 * %true if some freezable workqueues are still busy. %false if freezing 3734 * is complete. 3735 */ 3736 bool freeze_workqueues_busy(void) 3737 { 3738 unsigned int cpu; 3739 bool busy = false; 3740 3741 spin_lock(&workqueue_lock); 3742 3743 BUG_ON(!workqueue_freezing); 3744 3745 for_each_gcwq_cpu(cpu) { 3746 struct workqueue_struct *wq; 3747 /* 3748 * nr_active is monotonically decreasing. It's safe 3749 * to peek without lock. 3750 */ 3751 list_for_each_entry(wq, &workqueues, list) { 3752 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3753 3754 if (!cwq || !(wq->flags & WQ_FREEZABLE)) 3755 continue; 3756 3757 BUG_ON(cwq->nr_active < 0); 3758 if (cwq->nr_active) { 3759 busy = true; 3760 goto out_unlock; 3761 } 3762 } 3763 } 3764 out_unlock: 3765 spin_unlock(&workqueue_lock); 3766 return busy; 3767 } 3768 3769 /** 3770 * thaw_workqueues - thaw workqueues 3771 * 3772 * Thaw workqueues. Normal queueing is restored and all collected 3773 * frozen works are transferred to their respective gcwq worklists. 3774 * 3775 * CONTEXT: 3776 * Grabs and releases workqueue_lock and gcwq->lock's. 3777 */ 3778 void thaw_workqueues(void) 3779 { 3780 unsigned int cpu; 3781 3782 spin_lock(&workqueue_lock); 3783 3784 if (!workqueue_freezing) 3785 goto out_unlock; 3786 3787 for_each_gcwq_cpu(cpu) { 3788 struct global_cwq *gcwq = get_gcwq(cpu); 3789 struct worker_pool *pool; 3790 struct workqueue_struct *wq; 3791 3792 spin_lock_irq(&gcwq->lock); 3793 3794 BUG_ON(!(gcwq->flags & GCWQ_FREEZING)); 3795 gcwq->flags &= ~GCWQ_FREEZING; 3796 3797 list_for_each_entry(wq, &workqueues, list) { 3798 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3799 3800 if (!cwq || !(wq->flags & WQ_FREEZABLE)) 3801 continue; 3802 3803 /* restore max_active and repopulate worklist */ 3804 cwq_set_max_active(cwq, wq->saved_max_active); 3805 } 3806 3807 for_each_worker_pool(pool, gcwq) 3808 wake_up_worker(pool); 3809 3810 spin_unlock_irq(&gcwq->lock); 3811 } 3812 3813 workqueue_freezing = false; 3814 out_unlock: 3815 spin_unlock(&workqueue_lock); 3816 } 3817 #endif /* CONFIG_FREEZER */ 3818 3819 static int __init init_workqueues(void) 3820 { 3821 unsigned int cpu; 3822 int i; 3823 3824 /* make sure we have enough bits for OFFQ CPU number */ 3825 BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_CPU_SHIFT)) < 3826 WORK_CPU_LAST); 3827 3828 cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP); 3829 hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN); 3830 3831 /* initialize gcwqs */ 3832 for_each_gcwq_cpu(cpu) { 3833 struct global_cwq *gcwq = get_gcwq(cpu); 3834 struct worker_pool *pool; 3835 3836 spin_lock_init(&gcwq->lock); 3837 gcwq->cpu = cpu; 3838 gcwq->flags |= GCWQ_DISASSOCIATED; 3839 3840 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) 3841 INIT_HLIST_HEAD(&gcwq->busy_hash[i]); 3842 3843 for_each_worker_pool(pool, gcwq) { 3844 pool->gcwq = gcwq; 3845 INIT_LIST_HEAD(&pool->worklist); 3846 INIT_LIST_HEAD(&pool->idle_list); 3847 3848 init_timer_deferrable(&pool->idle_timer); 3849 pool->idle_timer.function = idle_worker_timeout; 3850 pool->idle_timer.data = (unsigned long)pool; 3851 3852 setup_timer(&pool->mayday_timer, gcwq_mayday_timeout, 3853 (unsigned long)pool); 3854 3855 mutex_init(&pool->assoc_mutex); 3856 ida_init(&pool->worker_ida); 3857 } 3858 } 3859 3860 /* create the initial worker */ 3861 for_each_online_gcwq_cpu(cpu) { 3862 struct global_cwq *gcwq = get_gcwq(cpu); 3863 struct worker_pool *pool; 3864 3865 if (cpu != WORK_CPU_UNBOUND) 3866 gcwq->flags &= ~GCWQ_DISASSOCIATED; 3867 3868 for_each_worker_pool(pool, gcwq) { 3869 struct worker *worker; 3870 3871 worker = create_worker(pool); 3872 BUG_ON(!worker); 3873 spin_lock_irq(&gcwq->lock); 3874 start_worker(worker); 3875 spin_unlock_irq(&gcwq->lock); 3876 } 3877 } 3878 3879 system_wq = alloc_workqueue("events", 0, 0); 3880 system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0); 3881 system_long_wq = alloc_workqueue("events_long", 0, 0); 3882 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, 3883 WQ_UNBOUND_MAX_ACTIVE); 3884 system_freezable_wq = alloc_workqueue("events_freezable", 3885 WQ_FREEZABLE, 0); 3886 BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq || 3887 !system_unbound_wq || !system_freezable_wq); 3888 return 0; 3889 } 3890 early_initcall(init_workqueues); 3891