1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Basic worker thread pool for io_uring 4 * 5 * Copyright (C) 2019 Jens Axboe 6 * 7 */ 8 #include <linux/kernel.h> 9 #include <linux/init.h> 10 #include <linux/errno.h> 11 #include <linux/sched/signal.h> 12 #include <linux/percpu.h> 13 #include <linux/slab.h> 14 #include <linux/rculist_nulls.h> 15 #include <linux/cpu.h> 16 #include <linux/cpuset.h> 17 #include <linux/task_work.h> 18 #include <linux/audit.h> 19 #include <linux/mmu_context.h> 20 #include <uapi/linux/io_uring.h> 21 22 #include "io-wq.h" 23 #include "slist.h" 24 #include "io_uring.h" 25 26 #define WORKER_IDLE_TIMEOUT (5 * HZ) 27 #define WORKER_INIT_LIMIT 3 28 29 enum { 30 IO_WORKER_F_UP = 0, /* up and active */ 31 IO_WORKER_F_RUNNING = 1, /* account as running */ 32 IO_WORKER_F_FREE = 2, /* worker on free list */ 33 IO_WORKER_F_BOUND = 3, /* is doing bounded work */ 34 }; 35 36 enum { 37 IO_WQ_BIT_EXIT = 0, /* wq exiting */ 38 }; 39 40 enum { 41 IO_ACCT_STALLED_BIT = 0, /* stalled on hash */ 42 }; 43 44 /* 45 * One for each thread in a wq pool 46 */ 47 struct io_worker { 48 refcount_t ref; 49 int create_index; 50 unsigned long flags; 51 struct hlist_nulls_node nulls_node; 52 struct list_head all_list; 53 struct task_struct *task; 54 struct io_wq *wq; 55 56 struct io_wq_work *cur_work; 57 struct io_wq_work *next_work; 58 raw_spinlock_t lock; 59 60 struct completion ref_done; 61 62 unsigned long create_state; 63 struct callback_head create_work; 64 int init_retries; 65 66 union { 67 struct rcu_head rcu; 68 struct work_struct work; 69 }; 70 }; 71 72 #if BITS_PER_LONG == 64 73 #define IO_WQ_HASH_ORDER 6 74 #else 75 #define IO_WQ_HASH_ORDER 5 76 #endif 77 78 #define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER) 79 80 struct io_wq_acct { 81 unsigned nr_workers; 82 unsigned max_workers; 83 int index; 84 atomic_t nr_running; 85 raw_spinlock_t lock; 86 struct io_wq_work_list work_list; 87 unsigned long flags; 88 }; 89 90 enum { 91 IO_WQ_ACCT_BOUND, 92 IO_WQ_ACCT_UNBOUND, 93 IO_WQ_ACCT_NR, 94 }; 95 96 /* 97 * Per io_wq state 98 */ 99 struct io_wq { 100 unsigned long state; 101 102 free_work_fn *free_work; 103 io_wq_work_fn *do_work; 104 105 struct io_wq_hash *hash; 106 107 atomic_t worker_refs; 108 struct completion worker_done; 109 110 struct hlist_node cpuhp_node; 111 112 struct task_struct *task; 113 114 struct io_wq_acct acct[IO_WQ_ACCT_NR]; 115 116 /* lock protects access to elements below */ 117 raw_spinlock_t lock; 118 119 struct hlist_nulls_head free_list; 120 struct list_head all_list; 121 122 struct wait_queue_entry wait; 123 124 struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS]; 125 126 cpumask_var_t cpu_mask; 127 }; 128 129 static enum cpuhp_state io_wq_online; 130 131 struct io_cb_cancel_data { 132 work_cancel_fn *fn; 133 void *data; 134 int nr_running; 135 int nr_pending; 136 bool cancel_all; 137 }; 138 139 static bool create_io_worker(struct io_wq *wq, int index); 140 static void io_wq_dec_running(struct io_worker *worker); 141 static bool io_acct_cancel_pending_work(struct io_wq *wq, 142 struct io_wq_acct *acct, 143 struct io_cb_cancel_data *match); 144 static void create_worker_cb(struct callback_head *cb); 145 static void io_wq_cancel_tw_create(struct io_wq *wq); 146 147 static bool io_worker_get(struct io_worker *worker) 148 { 149 return refcount_inc_not_zero(&worker->ref); 150 } 151 152 static void io_worker_release(struct io_worker *worker) 153 { 154 if (refcount_dec_and_test(&worker->ref)) 155 complete(&worker->ref_done); 156 } 157 158 static inline struct io_wq_acct *io_get_acct(struct io_wq *wq, bool bound) 159 { 160 return &wq->acct[bound ? IO_WQ_ACCT_BOUND : IO_WQ_ACCT_UNBOUND]; 161 } 162 163 static inline struct io_wq_acct *io_work_get_acct(struct io_wq *wq, 164 struct io_wq_work *work) 165 { 166 return io_get_acct(wq, !(work->flags & IO_WQ_WORK_UNBOUND)); 167 } 168 169 static inline struct io_wq_acct *io_wq_get_acct(struct io_worker *worker) 170 { 171 return io_get_acct(worker->wq, test_bit(IO_WORKER_F_BOUND, &worker->flags)); 172 } 173 174 static void io_worker_ref_put(struct io_wq *wq) 175 { 176 if (atomic_dec_and_test(&wq->worker_refs)) 177 complete(&wq->worker_done); 178 } 179 180 bool io_wq_worker_stopped(void) 181 { 182 struct io_worker *worker = current->worker_private; 183 184 if (WARN_ON_ONCE(!io_wq_current_is_worker())) 185 return true; 186 187 return test_bit(IO_WQ_BIT_EXIT, &worker->wq->state); 188 } 189 190 static void io_worker_cancel_cb(struct io_worker *worker) 191 { 192 struct io_wq_acct *acct = io_wq_get_acct(worker); 193 struct io_wq *wq = worker->wq; 194 195 atomic_dec(&acct->nr_running); 196 raw_spin_lock(&wq->lock); 197 acct->nr_workers--; 198 raw_spin_unlock(&wq->lock); 199 io_worker_ref_put(wq); 200 clear_bit_unlock(0, &worker->create_state); 201 io_worker_release(worker); 202 } 203 204 static bool io_task_worker_match(struct callback_head *cb, void *data) 205 { 206 struct io_worker *worker; 207 208 if (cb->func != create_worker_cb) 209 return false; 210 worker = container_of(cb, struct io_worker, create_work); 211 return worker == data; 212 } 213 214 static void io_worker_exit(struct io_worker *worker) 215 { 216 struct io_wq *wq = worker->wq; 217 218 while (1) { 219 struct callback_head *cb = task_work_cancel_match(wq->task, 220 io_task_worker_match, worker); 221 222 if (!cb) 223 break; 224 io_worker_cancel_cb(worker); 225 } 226 227 io_worker_release(worker); 228 wait_for_completion(&worker->ref_done); 229 230 raw_spin_lock(&wq->lock); 231 if (test_bit(IO_WORKER_F_FREE, &worker->flags)) 232 hlist_nulls_del_rcu(&worker->nulls_node); 233 list_del_rcu(&worker->all_list); 234 raw_spin_unlock(&wq->lock); 235 io_wq_dec_running(worker); 236 /* 237 * this worker is a goner, clear ->worker_private to avoid any 238 * inc/dec running calls that could happen as part of exit from 239 * touching 'worker'. 240 */ 241 current->worker_private = NULL; 242 243 kfree_rcu(worker, rcu); 244 io_worker_ref_put(wq); 245 do_exit(0); 246 } 247 248 static inline bool __io_acct_run_queue(struct io_wq_acct *acct) 249 { 250 return !test_bit(IO_ACCT_STALLED_BIT, &acct->flags) && 251 !wq_list_empty(&acct->work_list); 252 } 253 254 /* 255 * If there's work to do, returns true with acct->lock acquired. If not, 256 * returns false with no lock held. 257 */ 258 static inline bool io_acct_run_queue(struct io_wq_acct *acct) 259 __acquires(&acct->lock) 260 { 261 raw_spin_lock(&acct->lock); 262 if (__io_acct_run_queue(acct)) 263 return true; 264 265 raw_spin_unlock(&acct->lock); 266 return false; 267 } 268 269 /* 270 * Check head of free list for an available worker. If one isn't available, 271 * caller must create one. 272 */ 273 static bool io_wq_activate_free_worker(struct io_wq *wq, 274 struct io_wq_acct *acct) 275 __must_hold(RCU) 276 { 277 struct hlist_nulls_node *n; 278 struct io_worker *worker; 279 280 /* 281 * Iterate free_list and see if we can find an idle worker to 282 * activate. If a given worker is on the free_list but in the process 283 * of exiting, keep trying. 284 */ 285 hlist_nulls_for_each_entry_rcu(worker, n, &wq->free_list, nulls_node) { 286 if (!io_worker_get(worker)) 287 continue; 288 if (io_wq_get_acct(worker) != acct) { 289 io_worker_release(worker); 290 continue; 291 } 292 /* 293 * If the worker is already running, it's either already 294 * starting work or finishing work. In either case, if it does 295 * to go sleep, we'll kick off a new task for this work anyway. 296 */ 297 wake_up_process(worker->task); 298 io_worker_release(worker); 299 return true; 300 } 301 302 return false; 303 } 304 305 /* 306 * We need a worker. If we find a free one, we're good. If not, and we're 307 * below the max number of workers, create one. 308 */ 309 static bool io_wq_create_worker(struct io_wq *wq, struct io_wq_acct *acct) 310 { 311 /* 312 * Most likely an attempt to queue unbounded work on an io_wq that 313 * wasn't setup with any unbounded workers. 314 */ 315 if (unlikely(!acct->max_workers)) 316 pr_warn_once("io-wq is not configured for unbound workers"); 317 318 raw_spin_lock(&wq->lock); 319 if (acct->nr_workers >= acct->max_workers) { 320 raw_spin_unlock(&wq->lock); 321 return true; 322 } 323 acct->nr_workers++; 324 raw_spin_unlock(&wq->lock); 325 atomic_inc(&acct->nr_running); 326 atomic_inc(&wq->worker_refs); 327 return create_io_worker(wq, acct->index); 328 } 329 330 static void io_wq_inc_running(struct io_worker *worker) 331 { 332 struct io_wq_acct *acct = io_wq_get_acct(worker); 333 334 atomic_inc(&acct->nr_running); 335 } 336 337 static void create_worker_cb(struct callback_head *cb) 338 { 339 struct io_worker *worker; 340 struct io_wq *wq; 341 342 struct io_wq_acct *acct; 343 bool do_create = false; 344 345 worker = container_of(cb, struct io_worker, create_work); 346 wq = worker->wq; 347 acct = &wq->acct[worker->create_index]; 348 raw_spin_lock(&wq->lock); 349 350 if (acct->nr_workers < acct->max_workers) { 351 acct->nr_workers++; 352 do_create = true; 353 } 354 raw_spin_unlock(&wq->lock); 355 if (do_create) { 356 create_io_worker(wq, worker->create_index); 357 } else { 358 atomic_dec(&acct->nr_running); 359 io_worker_ref_put(wq); 360 } 361 clear_bit_unlock(0, &worker->create_state); 362 io_worker_release(worker); 363 } 364 365 static bool io_queue_worker_create(struct io_worker *worker, 366 struct io_wq_acct *acct, 367 task_work_func_t func) 368 { 369 struct io_wq *wq = worker->wq; 370 371 /* raced with exit, just ignore create call */ 372 if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) 373 goto fail; 374 if (!io_worker_get(worker)) 375 goto fail; 376 /* 377 * create_state manages ownership of create_work/index. We should 378 * only need one entry per worker, as the worker going to sleep 379 * will trigger the condition, and waking will clear it once it 380 * runs the task_work. 381 */ 382 if (test_bit(0, &worker->create_state) || 383 test_and_set_bit_lock(0, &worker->create_state)) 384 goto fail_release; 385 386 atomic_inc(&wq->worker_refs); 387 init_task_work(&worker->create_work, func); 388 worker->create_index = acct->index; 389 if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) { 390 /* 391 * EXIT may have been set after checking it above, check after 392 * adding the task_work and remove any creation item if it is 393 * now set. wq exit does that too, but we can have added this 394 * work item after we canceled in io_wq_exit_workers(). 395 */ 396 if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) 397 io_wq_cancel_tw_create(wq); 398 io_worker_ref_put(wq); 399 return true; 400 } 401 io_worker_ref_put(wq); 402 clear_bit_unlock(0, &worker->create_state); 403 fail_release: 404 io_worker_release(worker); 405 fail: 406 atomic_dec(&acct->nr_running); 407 io_worker_ref_put(wq); 408 return false; 409 } 410 411 static void io_wq_dec_running(struct io_worker *worker) 412 { 413 struct io_wq_acct *acct = io_wq_get_acct(worker); 414 struct io_wq *wq = worker->wq; 415 416 if (!test_bit(IO_WORKER_F_UP, &worker->flags)) 417 return; 418 419 if (!atomic_dec_and_test(&acct->nr_running)) 420 return; 421 if (!io_acct_run_queue(acct)) 422 return; 423 424 raw_spin_unlock(&acct->lock); 425 atomic_inc(&acct->nr_running); 426 atomic_inc(&wq->worker_refs); 427 io_queue_worker_create(worker, acct, create_worker_cb); 428 } 429 430 /* 431 * Worker will start processing some work. Move it to the busy list, if 432 * it's currently on the freelist 433 */ 434 static void __io_worker_busy(struct io_wq *wq, struct io_worker *worker) 435 { 436 if (test_bit(IO_WORKER_F_FREE, &worker->flags)) { 437 clear_bit(IO_WORKER_F_FREE, &worker->flags); 438 raw_spin_lock(&wq->lock); 439 hlist_nulls_del_init_rcu(&worker->nulls_node); 440 raw_spin_unlock(&wq->lock); 441 } 442 } 443 444 /* 445 * No work, worker going to sleep. Move to freelist. 446 */ 447 static void __io_worker_idle(struct io_wq *wq, struct io_worker *worker) 448 __must_hold(wq->lock) 449 { 450 if (!test_bit(IO_WORKER_F_FREE, &worker->flags)) { 451 set_bit(IO_WORKER_F_FREE, &worker->flags); 452 hlist_nulls_add_head_rcu(&worker->nulls_node, &wq->free_list); 453 } 454 } 455 456 static inline unsigned int io_get_work_hash(struct io_wq_work *work) 457 { 458 return work->flags >> IO_WQ_HASH_SHIFT; 459 } 460 461 static bool io_wait_on_hash(struct io_wq *wq, unsigned int hash) 462 { 463 bool ret = false; 464 465 spin_lock_irq(&wq->hash->wait.lock); 466 if (list_empty(&wq->wait.entry)) { 467 __add_wait_queue(&wq->hash->wait, &wq->wait); 468 if (!test_bit(hash, &wq->hash->map)) { 469 __set_current_state(TASK_RUNNING); 470 list_del_init(&wq->wait.entry); 471 ret = true; 472 } 473 } 474 spin_unlock_irq(&wq->hash->wait.lock); 475 return ret; 476 } 477 478 static struct io_wq_work *io_get_next_work(struct io_wq_acct *acct, 479 struct io_worker *worker) 480 __must_hold(acct->lock) 481 { 482 struct io_wq_work_node *node, *prev; 483 struct io_wq_work *work, *tail; 484 unsigned int stall_hash = -1U; 485 struct io_wq *wq = worker->wq; 486 487 wq_list_for_each(node, prev, &acct->work_list) { 488 unsigned int hash; 489 490 work = container_of(node, struct io_wq_work, list); 491 492 /* not hashed, can run anytime */ 493 if (!io_wq_is_hashed(work)) { 494 wq_list_del(&acct->work_list, node, prev); 495 return work; 496 } 497 498 hash = io_get_work_hash(work); 499 /* all items with this hash lie in [work, tail] */ 500 tail = wq->hash_tail[hash]; 501 502 /* hashed, can run if not already running */ 503 if (!test_and_set_bit(hash, &wq->hash->map)) { 504 wq->hash_tail[hash] = NULL; 505 wq_list_cut(&acct->work_list, &tail->list, prev); 506 return work; 507 } 508 if (stall_hash == -1U) 509 stall_hash = hash; 510 /* fast forward to a next hash, for-each will fix up @prev */ 511 node = &tail->list; 512 } 513 514 if (stall_hash != -1U) { 515 bool unstalled; 516 517 /* 518 * Set this before dropping the lock to avoid racing with new 519 * work being added and clearing the stalled bit. 520 */ 521 set_bit(IO_ACCT_STALLED_BIT, &acct->flags); 522 raw_spin_unlock(&acct->lock); 523 unstalled = io_wait_on_hash(wq, stall_hash); 524 raw_spin_lock(&acct->lock); 525 if (unstalled) { 526 clear_bit(IO_ACCT_STALLED_BIT, &acct->flags); 527 if (wq_has_sleeper(&wq->hash->wait)) 528 wake_up(&wq->hash->wait); 529 } 530 } 531 532 return NULL; 533 } 534 535 static void io_assign_current_work(struct io_worker *worker, 536 struct io_wq_work *work) 537 { 538 if (work) { 539 io_run_task_work(); 540 cond_resched(); 541 } 542 543 raw_spin_lock(&worker->lock); 544 worker->cur_work = work; 545 worker->next_work = NULL; 546 raw_spin_unlock(&worker->lock); 547 } 548 549 /* 550 * Called with acct->lock held, drops it before returning 551 */ 552 static void io_worker_handle_work(struct io_wq_acct *acct, 553 struct io_worker *worker) 554 __releases(&acct->lock) 555 { 556 struct io_wq *wq = worker->wq; 557 bool do_kill = test_bit(IO_WQ_BIT_EXIT, &wq->state); 558 559 do { 560 struct io_wq_work *work; 561 562 /* 563 * If we got some work, mark us as busy. If we didn't, but 564 * the list isn't empty, it means we stalled on hashed work. 565 * Mark us stalled so we don't keep looking for work when we 566 * can't make progress, any work completion or insertion will 567 * clear the stalled flag. 568 */ 569 work = io_get_next_work(acct, worker); 570 if (work) { 571 /* 572 * Make sure cancelation can find this, even before 573 * it becomes the active work. That avoids a window 574 * where the work has been removed from our general 575 * work list, but isn't yet discoverable as the 576 * current work item for this worker. 577 */ 578 raw_spin_lock(&worker->lock); 579 worker->next_work = work; 580 raw_spin_unlock(&worker->lock); 581 } 582 583 raw_spin_unlock(&acct->lock); 584 585 if (!work) 586 break; 587 588 __io_worker_busy(wq, worker); 589 590 io_assign_current_work(worker, work); 591 __set_current_state(TASK_RUNNING); 592 593 /* handle a whole dependent link */ 594 do { 595 struct io_wq_work *next_hashed, *linked; 596 unsigned int hash = io_get_work_hash(work); 597 598 next_hashed = wq_next_work(work); 599 600 if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND)) 601 work->flags |= IO_WQ_WORK_CANCEL; 602 wq->do_work(work); 603 io_assign_current_work(worker, NULL); 604 605 linked = wq->free_work(work); 606 work = next_hashed; 607 if (!work && linked && !io_wq_is_hashed(linked)) { 608 work = linked; 609 linked = NULL; 610 } 611 io_assign_current_work(worker, work); 612 if (linked) 613 io_wq_enqueue(wq, linked); 614 615 if (hash != -1U && !next_hashed) { 616 /* serialize hash clear with wake_up() */ 617 spin_lock_irq(&wq->hash->wait.lock); 618 clear_bit(hash, &wq->hash->map); 619 clear_bit(IO_ACCT_STALLED_BIT, &acct->flags); 620 spin_unlock_irq(&wq->hash->wait.lock); 621 if (wq_has_sleeper(&wq->hash->wait)) 622 wake_up(&wq->hash->wait); 623 } 624 } while (work); 625 626 if (!__io_acct_run_queue(acct)) 627 break; 628 raw_spin_lock(&acct->lock); 629 } while (1); 630 } 631 632 static int io_wq_worker(void *data) 633 { 634 struct io_worker *worker = data; 635 struct io_wq_acct *acct = io_wq_get_acct(worker); 636 struct io_wq *wq = worker->wq; 637 bool exit_mask = false, last_timeout = false; 638 char buf[TASK_COMM_LEN]; 639 640 set_mask_bits(&worker->flags, 0, 641 BIT(IO_WORKER_F_UP) | BIT(IO_WORKER_F_RUNNING)); 642 643 snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task->pid); 644 set_task_comm(current, buf); 645 646 while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) { 647 long ret; 648 649 set_current_state(TASK_INTERRUPTIBLE); 650 651 /* 652 * If we have work to do, io_acct_run_queue() returns with 653 * the acct->lock held. If not, it will drop it. 654 */ 655 while (io_acct_run_queue(acct)) 656 io_worker_handle_work(acct, worker); 657 658 raw_spin_lock(&wq->lock); 659 /* 660 * Last sleep timed out. Exit if we're not the last worker, 661 * or if someone modified our affinity. 662 */ 663 if (last_timeout && (exit_mask || acct->nr_workers > 1)) { 664 acct->nr_workers--; 665 raw_spin_unlock(&wq->lock); 666 __set_current_state(TASK_RUNNING); 667 break; 668 } 669 last_timeout = false; 670 __io_worker_idle(wq, worker); 671 raw_spin_unlock(&wq->lock); 672 if (io_run_task_work()) 673 continue; 674 ret = schedule_timeout(WORKER_IDLE_TIMEOUT); 675 if (signal_pending(current)) { 676 struct ksignal ksig; 677 678 if (!get_signal(&ksig)) 679 continue; 680 break; 681 } 682 if (!ret) { 683 last_timeout = true; 684 exit_mask = !cpumask_test_cpu(raw_smp_processor_id(), 685 wq->cpu_mask); 686 } 687 } 688 689 if (test_bit(IO_WQ_BIT_EXIT, &wq->state) && io_acct_run_queue(acct)) 690 io_worker_handle_work(acct, worker); 691 692 io_worker_exit(worker); 693 return 0; 694 } 695 696 /* 697 * Called when a worker is scheduled in. Mark us as currently running. 698 */ 699 void io_wq_worker_running(struct task_struct *tsk) 700 { 701 struct io_worker *worker = tsk->worker_private; 702 703 if (!worker) 704 return; 705 if (!test_bit(IO_WORKER_F_UP, &worker->flags)) 706 return; 707 if (test_bit(IO_WORKER_F_RUNNING, &worker->flags)) 708 return; 709 set_bit(IO_WORKER_F_RUNNING, &worker->flags); 710 io_wq_inc_running(worker); 711 } 712 713 /* 714 * Called when worker is going to sleep. If there are no workers currently 715 * running and we have work pending, wake up a free one or create a new one. 716 */ 717 void io_wq_worker_sleeping(struct task_struct *tsk) 718 { 719 struct io_worker *worker = tsk->worker_private; 720 721 if (!worker) 722 return; 723 if (!test_bit(IO_WORKER_F_UP, &worker->flags)) 724 return; 725 if (!test_bit(IO_WORKER_F_RUNNING, &worker->flags)) 726 return; 727 728 clear_bit(IO_WORKER_F_RUNNING, &worker->flags); 729 io_wq_dec_running(worker); 730 } 731 732 static void io_init_new_worker(struct io_wq *wq, struct io_worker *worker, 733 struct task_struct *tsk) 734 { 735 tsk->worker_private = worker; 736 worker->task = tsk; 737 set_cpus_allowed_ptr(tsk, wq->cpu_mask); 738 739 raw_spin_lock(&wq->lock); 740 hlist_nulls_add_head_rcu(&worker->nulls_node, &wq->free_list); 741 list_add_tail_rcu(&worker->all_list, &wq->all_list); 742 set_bit(IO_WORKER_F_FREE, &worker->flags); 743 raw_spin_unlock(&wq->lock); 744 wake_up_new_task(tsk); 745 } 746 747 static bool io_wq_work_match_all(struct io_wq_work *work, void *data) 748 { 749 return true; 750 } 751 752 static inline bool io_should_retry_thread(struct io_worker *worker, long err) 753 { 754 /* 755 * Prevent perpetual task_work retry, if the task (or its group) is 756 * exiting. 757 */ 758 if (fatal_signal_pending(current)) 759 return false; 760 if (worker->init_retries++ >= WORKER_INIT_LIMIT) 761 return false; 762 763 switch (err) { 764 case -EAGAIN: 765 case -ERESTARTSYS: 766 case -ERESTARTNOINTR: 767 case -ERESTARTNOHAND: 768 return true; 769 default: 770 return false; 771 } 772 } 773 774 static void create_worker_cont(struct callback_head *cb) 775 { 776 struct io_worker *worker; 777 struct task_struct *tsk; 778 struct io_wq *wq; 779 780 worker = container_of(cb, struct io_worker, create_work); 781 clear_bit_unlock(0, &worker->create_state); 782 wq = worker->wq; 783 tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE); 784 if (!IS_ERR(tsk)) { 785 io_init_new_worker(wq, worker, tsk); 786 io_worker_release(worker); 787 return; 788 } else if (!io_should_retry_thread(worker, PTR_ERR(tsk))) { 789 struct io_wq_acct *acct = io_wq_get_acct(worker); 790 791 atomic_dec(&acct->nr_running); 792 raw_spin_lock(&wq->lock); 793 acct->nr_workers--; 794 if (!acct->nr_workers) { 795 struct io_cb_cancel_data match = { 796 .fn = io_wq_work_match_all, 797 .cancel_all = true, 798 }; 799 800 raw_spin_unlock(&wq->lock); 801 while (io_acct_cancel_pending_work(wq, acct, &match)) 802 ; 803 } else { 804 raw_spin_unlock(&wq->lock); 805 } 806 io_worker_ref_put(wq); 807 kfree(worker); 808 return; 809 } 810 811 /* re-create attempts grab a new worker ref, drop the existing one */ 812 io_worker_release(worker); 813 schedule_work(&worker->work); 814 } 815 816 static void io_workqueue_create(struct work_struct *work) 817 { 818 struct io_worker *worker = container_of(work, struct io_worker, work); 819 struct io_wq_acct *acct = io_wq_get_acct(worker); 820 821 if (!io_queue_worker_create(worker, acct, create_worker_cont)) 822 kfree(worker); 823 } 824 825 static bool create_io_worker(struct io_wq *wq, int index) 826 { 827 struct io_wq_acct *acct = &wq->acct[index]; 828 struct io_worker *worker; 829 struct task_struct *tsk; 830 831 __set_current_state(TASK_RUNNING); 832 833 worker = kzalloc(sizeof(*worker), GFP_KERNEL); 834 if (!worker) { 835 fail: 836 atomic_dec(&acct->nr_running); 837 raw_spin_lock(&wq->lock); 838 acct->nr_workers--; 839 raw_spin_unlock(&wq->lock); 840 io_worker_ref_put(wq); 841 return false; 842 } 843 844 refcount_set(&worker->ref, 1); 845 worker->wq = wq; 846 raw_spin_lock_init(&worker->lock); 847 init_completion(&worker->ref_done); 848 849 if (index == IO_WQ_ACCT_BOUND) 850 set_bit(IO_WORKER_F_BOUND, &worker->flags); 851 852 tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE); 853 if (!IS_ERR(tsk)) { 854 io_init_new_worker(wq, worker, tsk); 855 } else if (!io_should_retry_thread(worker, PTR_ERR(tsk))) { 856 kfree(worker); 857 goto fail; 858 } else { 859 INIT_WORK(&worker->work, io_workqueue_create); 860 schedule_work(&worker->work); 861 } 862 863 return true; 864 } 865 866 /* 867 * Iterate the passed in list and call the specific function for each 868 * worker that isn't exiting 869 */ 870 static bool io_wq_for_each_worker(struct io_wq *wq, 871 bool (*func)(struct io_worker *, void *), 872 void *data) 873 { 874 struct io_worker *worker; 875 bool ret = false; 876 877 list_for_each_entry_rcu(worker, &wq->all_list, all_list) { 878 if (io_worker_get(worker)) { 879 /* no task if node is/was offline */ 880 if (worker->task) 881 ret = func(worker, data); 882 io_worker_release(worker); 883 if (ret) 884 break; 885 } 886 } 887 888 return ret; 889 } 890 891 static bool io_wq_worker_wake(struct io_worker *worker, void *data) 892 { 893 __set_notify_signal(worker->task); 894 wake_up_process(worker->task); 895 return false; 896 } 897 898 static void io_run_cancel(struct io_wq_work *work, struct io_wq *wq) 899 { 900 do { 901 work->flags |= IO_WQ_WORK_CANCEL; 902 wq->do_work(work); 903 work = wq->free_work(work); 904 } while (work); 905 } 906 907 static void io_wq_insert_work(struct io_wq *wq, struct io_wq_work *work) 908 { 909 struct io_wq_acct *acct = io_work_get_acct(wq, work); 910 unsigned int hash; 911 struct io_wq_work *tail; 912 913 if (!io_wq_is_hashed(work)) { 914 append: 915 wq_list_add_tail(&work->list, &acct->work_list); 916 return; 917 } 918 919 hash = io_get_work_hash(work); 920 tail = wq->hash_tail[hash]; 921 wq->hash_tail[hash] = work; 922 if (!tail) 923 goto append; 924 925 wq_list_add_after(&work->list, &tail->list, &acct->work_list); 926 } 927 928 static bool io_wq_work_match_item(struct io_wq_work *work, void *data) 929 { 930 return work == data; 931 } 932 933 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work) 934 { 935 struct io_wq_acct *acct = io_work_get_acct(wq, work); 936 unsigned long work_flags = work->flags; 937 struct io_cb_cancel_data match = { 938 .fn = io_wq_work_match_item, 939 .data = work, 940 .cancel_all = false, 941 }; 942 bool do_create; 943 944 /* 945 * If io-wq is exiting for this task, or if the request has explicitly 946 * been marked as one that should not get executed, cancel it here. 947 */ 948 if (test_bit(IO_WQ_BIT_EXIT, &wq->state) || 949 (work->flags & IO_WQ_WORK_CANCEL)) { 950 io_run_cancel(work, wq); 951 return; 952 } 953 954 raw_spin_lock(&acct->lock); 955 io_wq_insert_work(wq, work); 956 clear_bit(IO_ACCT_STALLED_BIT, &acct->flags); 957 raw_spin_unlock(&acct->lock); 958 959 rcu_read_lock(); 960 do_create = !io_wq_activate_free_worker(wq, acct); 961 rcu_read_unlock(); 962 963 if (do_create && ((work_flags & IO_WQ_WORK_CONCURRENT) || 964 !atomic_read(&acct->nr_running))) { 965 bool did_create; 966 967 did_create = io_wq_create_worker(wq, acct); 968 if (likely(did_create)) 969 return; 970 971 raw_spin_lock(&wq->lock); 972 if (acct->nr_workers) { 973 raw_spin_unlock(&wq->lock); 974 return; 975 } 976 raw_spin_unlock(&wq->lock); 977 978 /* fatal condition, failed to create the first worker */ 979 io_acct_cancel_pending_work(wq, acct, &match); 980 } 981 } 982 983 /* 984 * Work items that hash to the same value will not be done in parallel. 985 * Used to limit concurrent writes, generally hashed by inode. 986 */ 987 void io_wq_hash_work(struct io_wq_work *work, void *val) 988 { 989 unsigned int bit; 990 991 bit = hash_ptr(val, IO_WQ_HASH_ORDER); 992 work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT)); 993 } 994 995 static bool __io_wq_worker_cancel(struct io_worker *worker, 996 struct io_cb_cancel_data *match, 997 struct io_wq_work *work) 998 { 999 if (work && match->fn(work, match->data)) { 1000 work->flags |= IO_WQ_WORK_CANCEL; 1001 __set_notify_signal(worker->task); 1002 return true; 1003 } 1004 1005 return false; 1006 } 1007 1008 static bool io_wq_worker_cancel(struct io_worker *worker, void *data) 1009 { 1010 struct io_cb_cancel_data *match = data; 1011 1012 /* 1013 * Hold the lock to avoid ->cur_work going out of scope, caller 1014 * may dereference the passed in work. 1015 */ 1016 raw_spin_lock(&worker->lock); 1017 if (__io_wq_worker_cancel(worker, match, worker->cur_work) || 1018 __io_wq_worker_cancel(worker, match, worker->next_work)) 1019 match->nr_running++; 1020 raw_spin_unlock(&worker->lock); 1021 1022 return match->nr_running && !match->cancel_all; 1023 } 1024 1025 static inline void io_wq_remove_pending(struct io_wq *wq, 1026 struct io_wq_work *work, 1027 struct io_wq_work_node *prev) 1028 { 1029 struct io_wq_acct *acct = io_work_get_acct(wq, work); 1030 unsigned int hash = io_get_work_hash(work); 1031 struct io_wq_work *prev_work = NULL; 1032 1033 if (io_wq_is_hashed(work) && work == wq->hash_tail[hash]) { 1034 if (prev) 1035 prev_work = container_of(prev, struct io_wq_work, list); 1036 if (prev_work && io_get_work_hash(prev_work) == hash) 1037 wq->hash_tail[hash] = prev_work; 1038 else 1039 wq->hash_tail[hash] = NULL; 1040 } 1041 wq_list_del(&acct->work_list, &work->list, prev); 1042 } 1043 1044 static bool io_acct_cancel_pending_work(struct io_wq *wq, 1045 struct io_wq_acct *acct, 1046 struct io_cb_cancel_data *match) 1047 { 1048 struct io_wq_work_node *node, *prev; 1049 struct io_wq_work *work; 1050 1051 raw_spin_lock(&acct->lock); 1052 wq_list_for_each(node, prev, &acct->work_list) { 1053 work = container_of(node, struct io_wq_work, list); 1054 if (!match->fn(work, match->data)) 1055 continue; 1056 io_wq_remove_pending(wq, work, prev); 1057 raw_spin_unlock(&acct->lock); 1058 io_run_cancel(work, wq); 1059 match->nr_pending++; 1060 /* not safe to continue after unlock */ 1061 return true; 1062 } 1063 raw_spin_unlock(&acct->lock); 1064 1065 return false; 1066 } 1067 1068 static void io_wq_cancel_pending_work(struct io_wq *wq, 1069 struct io_cb_cancel_data *match) 1070 { 1071 int i; 1072 retry: 1073 for (i = 0; i < IO_WQ_ACCT_NR; i++) { 1074 struct io_wq_acct *acct = io_get_acct(wq, i == 0); 1075 1076 if (io_acct_cancel_pending_work(wq, acct, match)) { 1077 if (match->cancel_all) 1078 goto retry; 1079 break; 1080 } 1081 } 1082 } 1083 1084 static void io_wq_cancel_running_work(struct io_wq *wq, 1085 struct io_cb_cancel_data *match) 1086 { 1087 rcu_read_lock(); 1088 io_wq_for_each_worker(wq, io_wq_worker_cancel, match); 1089 rcu_read_unlock(); 1090 } 1091 1092 enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, 1093 void *data, bool cancel_all) 1094 { 1095 struct io_cb_cancel_data match = { 1096 .fn = cancel, 1097 .data = data, 1098 .cancel_all = cancel_all, 1099 }; 1100 1101 /* 1102 * First check pending list, if we're lucky we can just remove it 1103 * from there. CANCEL_OK means that the work is returned as-new, 1104 * no completion will be posted for it. 1105 * 1106 * Then check if a free (going busy) or busy worker has the work 1107 * currently running. If we find it there, we'll return CANCEL_RUNNING 1108 * as an indication that we attempt to signal cancellation. The 1109 * completion will run normally in this case. 1110 * 1111 * Do both of these while holding the wq->lock, to ensure that 1112 * we'll find a work item regardless of state. 1113 */ 1114 io_wq_cancel_pending_work(wq, &match); 1115 if (match.nr_pending && !match.cancel_all) 1116 return IO_WQ_CANCEL_OK; 1117 1118 raw_spin_lock(&wq->lock); 1119 io_wq_cancel_running_work(wq, &match); 1120 raw_spin_unlock(&wq->lock); 1121 if (match.nr_running && !match.cancel_all) 1122 return IO_WQ_CANCEL_RUNNING; 1123 1124 if (match.nr_running) 1125 return IO_WQ_CANCEL_RUNNING; 1126 if (match.nr_pending) 1127 return IO_WQ_CANCEL_OK; 1128 return IO_WQ_CANCEL_NOTFOUND; 1129 } 1130 1131 static int io_wq_hash_wake(struct wait_queue_entry *wait, unsigned mode, 1132 int sync, void *key) 1133 { 1134 struct io_wq *wq = container_of(wait, struct io_wq, wait); 1135 int i; 1136 1137 list_del_init(&wait->entry); 1138 1139 rcu_read_lock(); 1140 for (i = 0; i < IO_WQ_ACCT_NR; i++) { 1141 struct io_wq_acct *acct = &wq->acct[i]; 1142 1143 if (test_and_clear_bit(IO_ACCT_STALLED_BIT, &acct->flags)) 1144 io_wq_activate_free_worker(wq, acct); 1145 } 1146 rcu_read_unlock(); 1147 return 1; 1148 } 1149 1150 struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) 1151 { 1152 int ret, i; 1153 struct io_wq *wq; 1154 1155 if (WARN_ON_ONCE(!data->free_work || !data->do_work)) 1156 return ERR_PTR(-EINVAL); 1157 if (WARN_ON_ONCE(!bounded)) 1158 return ERR_PTR(-EINVAL); 1159 1160 wq = kzalloc(sizeof(struct io_wq), GFP_KERNEL); 1161 if (!wq) 1162 return ERR_PTR(-ENOMEM); 1163 1164 refcount_inc(&data->hash->refs); 1165 wq->hash = data->hash; 1166 wq->free_work = data->free_work; 1167 wq->do_work = data->do_work; 1168 1169 ret = -ENOMEM; 1170 1171 if (!alloc_cpumask_var(&wq->cpu_mask, GFP_KERNEL)) 1172 goto err; 1173 cpuset_cpus_allowed(data->task, wq->cpu_mask); 1174 wq->acct[IO_WQ_ACCT_BOUND].max_workers = bounded; 1175 wq->acct[IO_WQ_ACCT_UNBOUND].max_workers = 1176 task_rlimit(current, RLIMIT_NPROC); 1177 INIT_LIST_HEAD(&wq->wait.entry); 1178 wq->wait.func = io_wq_hash_wake; 1179 for (i = 0; i < IO_WQ_ACCT_NR; i++) { 1180 struct io_wq_acct *acct = &wq->acct[i]; 1181 1182 acct->index = i; 1183 atomic_set(&acct->nr_running, 0); 1184 INIT_WQ_LIST(&acct->work_list); 1185 raw_spin_lock_init(&acct->lock); 1186 } 1187 1188 raw_spin_lock_init(&wq->lock); 1189 INIT_HLIST_NULLS_HEAD(&wq->free_list, 0); 1190 INIT_LIST_HEAD(&wq->all_list); 1191 1192 wq->task = get_task_struct(data->task); 1193 atomic_set(&wq->worker_refs, 1); 1194 init_completion(&wq->worker_done); 1195 ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node); 1196 if (ret) 1197 goto err; 1198 1199 return wq; 1200 err: 1201 io_wq_put_hash(data->hash); 1202 free_cpumask_var(wq->cpu_mask); 1203 kfree(wq); 1204 return ERR_PTR(ret); 1205 } 1206 1207 static bool io_task_work_match(struct callback_head *cb, void *data) 1208 { 1209 struct io_worker *worker; 1210 1211 if (cb->func != create_worker_cb && cb->func != create_worker_cont) 1212 return false; 1213 worker = container_of(cb, struct io_worker, create_work); 1214 return worker->wq == data; 1215 } 1216 1217 void io_wq_exit_start(struct io_wq *wq) 1218 { 1219 set_bit(IO_WQ_BIT_EXIT, &wq->state); 1220 } 1221 1222 static void io_wq_cancel_tw_create(struct io_wq *wq) 1223 { 1224 struct callback_head *cb; 1225 1226 while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) { 1227 struct io_worker *worker; 1228 1229 worker = container_of(cb, struct io_worker, create_work); 1230 io_worker_cancel_cb(worker); 1231 /* 1232 * Only the worker continuation helper has worker allocated and 1233 * hence needs freeing. 1234 */ 1235 if (cb->func == create_worker_cont) 1236 kfree(worker); 1237 } 1238 } 1239 1240 static void io_wq_exit_workers(struct io_wq *wq) 1241 { 1242 if (!wq->task) 1243 return; 1244 1245 io_wq_cancel_tw_create(wq); 1246 1247 rcu_read_lock(); 1248 io_wq_for_each_worker(wq, io_wq_worker_wake, NULL); 1249 rcu_read_unlock(); 1250 io_worker_ref_put(wq); 1251 wait_for_completion(&wq->worker_done); 1252 1253 spin_lock_irq(&wq->hash->wait.lock); 1254 list_del_init(&wq->wait.entry); 1255 spin_unlock_irq(&wq->hash->wait.lock); 1256 1257 put_task_struct(wq->task); 1258 wq->task = NULL; 1259 } 1260 1261 static void io_wq_destroy(struct io_wq *wq) 1262 { 1263 struct io_cb_cancel_data match = { 1264 .fn = io_wq_work_match_all, 1265 .cancel_all = true, 1266 }; 1267 1268 cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node); 1269 io_wq_cancel_pending_work(wq, &match); 1270 free_cpumask_var(wq->cpu_mask); 1271 io_wq_put_hash(wq->hash); 1272 kfree(wq); 1273 } 1274 1275 void io_wq_put_and_exit(struct io_wq *wq) 1276 { 1277 WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT, &wq->state)); 1278 1279 io_wq_exit_workers(wq); 1280 io_wq_destroy(wq); 1281 } 1282 1283 struct online_data { 1284 unsigned int cpu; 1285 bool online; 1286 }; 1287 1288 static bool io_wq_worker_affinity(struct io_worker *worker, void *data) 1289 { 1290 struct online_data *od = data; 1291 1292 if (od->online) 1293 cpumask_set_cpu(od->cpu, worker->wq->cpu_mask); 1294 else 1295 cpumask_clear_cpu(od->cpu, worker->wq->cpu_mask); 1296 return false; 1297 } 1298 1299 static int __io_wq_cpu_online(struct io_wq *wq, unsigned int cpu, bool online) 1300 { 1301 struct online_data od = { 1302 .cpu = cpu, 1303 .online = online 1304 }; 1305 1306 rcu_read_lock(); 1307 io_wq_for_each_worker(wq, io_wq_worker_affinity, &od); 1308 rcu_read_unlock(); 1309 return 0; 1310 } 1311 1312 static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node) 1313 { 1314 struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node); 1315 1316 return __io_wq_cpu_online(wq, cpu, true); 1317 } 1318 1319 static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node) 1320 { 1321 struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node); 1322 1323 return __io_wq_cpu_online(wq, cpu, false); 1324 } 1325 1326 int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask) 1327 { 1328 cpumask_var_t allowed_mask; 1329 int ret = 0; 1330 1331 if (!tctx || !tctx->io_wq) 1332 return -EINVAL; 1333 1334 if (!alloc_cpumask_var(&allowed_mask, GFP_KERNEL)) 1335 return -ENOMEM; 1336 1337 rcu_read_lock(); 1338 cpuset_cpus_allowed(tctx->io_wq->task, allowed_mask); 1339 if (mask) { 1340 if (cpumask_subset(mask, allowed_mask)) 1341 cpumask_copy(tctx->io_wq->cpu_mask, mask); 1342 else 1343 ret = -EINVAL; 1344 } else { 1345 cpumask_copy(tctx->io_wq->cpu_mask, allowed_mask); 1346 } 1347 rcu_read_unlock(); 1348 1349 free_cpumask_var(allowed_mask); 1350 return ret; 1351 } 1352 1353 /* 1354 * Set max number of unbounded workers, returns old value. If new_count is 0, 1355 * then just return the old value. 1356 */ 1357 int io_wq_max_workers(struct io_wq *wq, int *new_count) 1358 { 1359 struct io_wq_acct *acct; 1360 int prev[IO_WQ_ACCT_NR]; 1361 int i; 1362 1363 BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND != (int) IO_WQ_BOUND); 1364 BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND); 1365 BUILD_BUG_ON((int) IO_WQ_ACCT_NR != 2); 1366 1367 for (i = 0; i < IO_WQ_ACCT_NR; i++) { 1368 if (new_count[i] > task_rlimit(current, RLIMIT_NPROC)) 1369 new_count[i] = task_rlimit(current, RLIMIT_NPROC); 1370 } 1371 1372 for (i = 0; i < IO_WQ_ACCT_NR; i++) 1373 prev[i] = 0; 1374 1375 rcu_read_lock(); 1376 1377 raw_spin_lock(&wq->lock); 1378 for (i = 0; i < IO_WQ_ACCT_NR; i++) { 1379 acct = &wq->acct[i]; 1380 prev[i] = max_t(int, acct->max_workers, prev[i]); 1381 if (new_count[i]) 1382 acct->max_workers = new_count[i]; 1383 } 1384 raw_spin_unlock(&wq->lock); 1385 rcu_read_unlock(); 1386 1387 for (i = 0; i < IO_WQ_ACCT_NR; i++) 1388 new_count[i] = prev[i]; 1389 1390 return 0; 1391 } 1392 1393 static __init int io_wq_init(void) 1394 { 1395 int ret; 1396 1397 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online", 1398 io_wq_cpu_online, io_wq_cpu_offline); 1399 if (ret < 0) 1400 return ret; 1401 io_wq_online = ret; 1402 return 0; 1403 } 1404 subsys_initcall(io_wq_init); 1405