xref: /openbmc/linux/io_uring/io-wq.c (revision 36db6e8484ed455bbb320d89a119378897ae991c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Basic worker thread pool for io_uring
4  *
5  * Copyright (C) 2019 Jens Axboe
6  *
7  */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/sched/signal.h>
12 #include <linux/percpu.h>
13 #include <linux/slab.h>
14 #include <linux/rculist_nulls.h>
15 #include <linux/cpu.h>
16 #include <linux/cpuset.h>
17 #include <linux/task_work.h>
18 #include <linux/audit.h>
19 #include <linux/mmu_context.h>
20 #include <uapi/linux/io_uring.h>
21 
22 #include "io-wq.h"
23 #include "slist.h"
24 #include "io_uring.h"
25 
26 #define WORKER_IDLE_TIMEOUT	(5 * HZ)
27 #define WORKER_INIT_LIMIT	3
28 
29 enum {
30 	IO_WORKER_F_UP		= 0,	/* up and active */
31 	IO_WORKER_F_RUNNING	= 1,	/* account as running */
32 	IO_WORKER_F_FREE	= 2,	/* worker on free list */
33 	IO_WORKER_F_BOUND	= 3,	/* is doing bounded work */
34 };
35 
36 enum {
37 	IO_WQ_BIT_EXIT		= 0,	/* wq exiting */
38 };
39 
40 enum {
41 	IO_ACCT_STALLED_BIT	= 0,	/* stalled on hash */
42 };
43 
44 /*
45  * One for each thread in a wq pool
46  */
47 struct io_worker {
48 	refcount_t ref;
49 	int create_index;
50 	unsigned long flags;
51 	struct hlist_nulls_node nulls_node;
52 	struct list_head all_list;
53 	struct task_struct *task;
54 	struct io_wq *wq;
55 
56 	struct io_wq_work *cur_work;
57 	struct io_wq_work *next_work;
58 	raw_spinlock_t lock;
59 
60 	struct completion ref_done;
61 
62 	unsigned long create_state;
63 	struct callback_head create_work;
64 	int init_retries;
65 
66 	union {
67 		struct rcu_head rcu;
68 		struct delayed_work work;
69 	};
70 };
71 
72 #if BITS_PER_LONG == 64
73 #define IO_WQ_HASH_ORDER	6
74 #else
75 #define IO_WQ_HASH_ORDER	5
76 #endif
77 
78 #define IO_WQ_NR_HASH_BUCKETS	(1u << IO_WQ_HASH_ORDER)
79 
80 struct io_wq_acct {
81 	unsigned nr_workers;
82 	unsigned max_workers;
83 	int index;
84 	atomic_t nr_running;
85 	raw_spinlock_t lock;
86 	struct io_wq_work_list work_list;
87 	unsigned long flags;
88 };
89 
90 enum {
91 	IO_WQ_ACCT_BOUND,
92 	IO_WQ_ACCT_UNBOUND,
93 	IO_WQ_ACCT_NR,
94 };
95 
96 /*
97  * Per io_wq state
98   */
99 struct io_wq {
100 	unsigned long state;
101 
102 	free_work_fn *free_work;
103 	io_wq_work_fn *do_work;
104 
105 	struct io_wq_hash *hash;
106 
107 	atomic_t worker_refs;
108 	struct completion worker_done;
109 
110 	struct hlist_node cpuhp_node;
111 
112 	struct task_struct *task;
113 
114 	struct io_wq_acct acct[IO_WQ_ACCT_NR];
115 
116 	/* lock protects access to elements below */
117 	raw_spinlock_t lock;
118 
119 	struct hlist_nulls_head free_list;
120 	struct list_head all_list;
121 
122 	struct wait_queue_entry wait;
123 
124 	struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
125 
126 	cpumask_var_t cpu_mask;
127 };
128 
129 static enum cpuhp_state io_wq_online;
130 
131 struct io_cb_cancel_data {
132 	work_cancel_fn *fn;
133 	void *data;
134 	int nr_running;
135 	int nr_pending;
136 	bool cancel_all;
137 };
138 
139 static bool create_io_worker(struct io_wq *wq, int index);
140 static void io_wq_dec_running(struct io_worker *worker);
141 static bool io_acct_cancel_pending_work(struct io_wq *wq,
142 					struct io_wq_acct *acct,
143 					struct io_cb_cancel_data *match);
144 static void create_worker_cb(struct callback_head *cb);
145 static void io_wq_cancel_tw_create(struct io_wq *wq);
146 
io_worker_get(struct io_worker * worker)147 static bool io_worker_get(struct io_worker *worker)
148 {
149 	return refcount_inc_not_zero(&worker->ref);
150 }
151 
io_worker_release(struct io_worker * worker)152 static void io_worker_release(struct io_worker *worker)
153 {
154 	if (refcount_dec_and_test(&worker->ref))
155 		complete(&worker->ref_done);
156 }
157 
io_get_acct(struct io_wq * wq,bool bound)158 static inline struct io_wq_acct *io_get_acct(struct io_wq *wq, bool bound)
159 {
160 	return &wq->acct[bound ? IO_WQ_ACCT_BOUND : IO_WQ_ACCT_UNBOUND];
161 }
162 
io_work_get_acct(struct io_wq * wq,struct io_wq_work * work)163 static inline struct io_wq_acct *io_work_get_acct(struct io_wq *wq,
164 						  struct io_wq_work *work)
165 {
166 	return io_get_acct(wq, !(work->flags & IO_WQ_WORK_UNBOUND));
167 }
168 
io_wq_get_acct(struct io_worker * worker)169 static inline struct io_wq_acct *io_wq_get_acct(struct io_worker *worker)
170 {
171 	return io_get_acct(worker->wq, test_bit(IO_WORKER_F_BOUND, &worker->flags));
172 }
173 
io_worker_ref_put(struct io_wq * wq)174 static void io_worker_ref_put(struct io_wq *wq)
175 {
176 	if (atomic_dec_and_test(&wq->worker_refs))
177 		complete(&wq->worker_done);
178 }
179 
io_wq_worker_stopped(void)180 bool io_wq_worker_stopped(void)
181 {
182 	struct io_worker *worker = current->worker_private;
183 
184 	if (WARN_ON_ONCE(!io_wq_current_is_worker()))
185 		return true;
186 
187 	return test_bit(IO_WQ_BIT_EXIT, &worker->wq->state);
188 }
189 
io_worker_cancel_cb(struct io_worker * worker)190 static void io_worker_cancel_cb(struct io_worker *worker)
191 {
192 	struct io_wq_acct *acct = io_wq_get_acct(worker);
193 	struct io_wq *wq = worker->wq;
194 
195 	atomic_dec(&acct->nr_running);
196 	raw_spin_lock(&wq->lock);
197 	acct->nr_workers--;
198 	raw_spin_unlock(&wq->lock);
199 	io_worker_ref_put(wq);
200 	clear_bit_unlock(0, &worker->create_state);
201 	io_worker_release(worker);
202 }
203 
io_task_worker_match(struct callback_head * cb,void * data)204 static bool io_task_worker_match(struct callback_head *cb, void *data)
205 {
206 	struct io_worker *worker;
207 
208 	if (cb->func != create_worker_cb)
209 		return false;
210 	worker = container_of(cb, struct io_worker, create_work);
211 	return worker == data;
212 }
213 
io_worker_exit(struct io_worker * worker)214 static void io_worker_exit(struct io_worker *worker)
215 {
216 	struct io_wq *wq = worker->wq;
217 
218 	while (1) {
219 		struct callback_head *cb = task_work_cancel_match(wq->task,
220 						io_task_worker_match, worker);
221 
222 		if (!cb)
223 			break;
224 		io_worker_cancel_cb(worker);
225 	}
226 
227 	io_worker_release(worker);
228 	wait_for_completion(&worker->ref_done);
229 
230 	raw_spin_lock(&wq->lock);
231 	if (test_bit(IO_WORKER_F_FREE, &worker->flags))
232 		hlist_nulls_del_rcu(&worker->nulls_node);
233 	list_del_rcu(&worker->all_list);
234 	raw_spin_unlock(&wq->lock);
235 	io_wq_dec_running(worker);
236 	/*
237 	 * this worker is a goner, clear ->worker_private to avoid any
238 	 * inc/dec running calls that could happen as part of exit from
239 	 * touching 'worker'.
240 	 */
241 	current->worker_private = NULL;
242 
243 	kfree_rcu(worker, rcu);
244 	io_worker_ref_put(wq);
245 	do_exit(0);
246 }
247 
__io_acct_run_queue(struct io_wq_acct * acct)248 static inline bool __io_acct_run_queue(struct io_wq_acct *acct)
249 {
250 	return !test_bit(IO_ACCT_STALLED_BIT, &acct->flags) &&
251 		!wq_list_empty(&acct->work_list);
252 }
253 
254 /*
255  * If there's work to do, returns true with acct->lock acquired. If not,
256  * returns false with no lock held.
257  */
io_acct_run_queue(struct io_wq_acct * acct)258 static inline bool io_acct_run_queue(struct io_wq_acct *acct)
259 	__acquires(&acct->lock)
260 {
261 	raw_spin_lock(&acct->lock);
262 	if (__io_acct_run_queue(acct))
263 		return true;
264 
265 	raw_spin_unlock(&acct->lock);
266 	return false;
267 }
268 
269 /*
270  * Check head of free list for an available worker. If one isn't available,
271  * caller must create one.
272  */
io_wq_activate_free_worker(struct io_wq * wq,struct io_wq_acct * acct)273 static bool io_wq_activate_free_worker(struct io_wq *wq,
274 					struct io_wq_acct *acct)
275 	__must_hold(RCU)
276 {
277 	struct hlist_nulls_node *n;
278 	struct io_worker *worker;
279 
280 	/*
281 	 * Iterate free_list and see if we can find an idle worker to
282 	 * activate. If a given worker is on the free_list but in the process
283 	 * of exiting, keep trying.
284 	 */
285 	hlist_nulls_for_each_entry_rcu(worker, n, &wq->free_list, nulls_node) {
286 		if (!io_worker_get(worker))
287 			continue;
288 		if (io_wq_get_acct(worker) != acct) {
289 			io_worker_release(worker);
290 			continue;
291 		}
292 		/*
293 		 * If the worker is already running, it's either already
294 		 * starting work or finishing work. In either case, if it does
295 		 * to go sleep, we'll kick off a new task for this work anyway.
296 		 */
297 		wake_up_process(worker->task);
298 		io_worker_release(worker);
299 		return true;
300 	}
301 
302 	return false;
303 }
304 
305 /*
306  * We need a worker. If we find a free one, we're good. If not, and we're
307  * below the max number of workers, create one.
308  */
io_wq_create_worker(struct io_wq * wq,struct io_wq_acct * acct)309 static bool io_wq_create_worker(struct io_wq *wq, struct io_wq_acct *acct)
310 {
311 	/*
312 	 * Most likely an attempt to queue unbounded work on an io_wq that
313 	 * wasn't setup with any unbounded workers.
314 	 */
315 	if (unlikely(!acct->max_workers))
316 		pr_warn_once("io-wq is not configured for unbound workers");
317 
318 	raw_spin_lock(&wq->lock);
319 	if (acct->nr_workers >= acct->max_workers) {
320 		raw_spin_unlock(&wq->lock);
321 		return true;
322 	}
323 	acct->nr_workers++;
324 	raw_spin_unlock(&wq->lock);
325 	atomic_inc(&acct->nr_running);
326 	atomic_inc(&wq->worker_refs);
327 	return create_io_worker(wq, acct->index);
328 }
329 
io_wq_inc_running(struct io_worker * worker)330 static void io_wq_inc_running(struct io_worker *worker)
331 {
332 	struct io_wq_acct *acct = io_wq_get_acct(worker);
333 
334 	atomic_inc(&acct->nr_running);
335 }
336 
create_worker_cb(struct callback_head * cb)337 static void create_worker_cb(struct callback_head *cb)
338 {
339 	struct io_worker *worker;
340 	struct io_wq *wq;
341 
342 	struct io_wq_acct *acct;
343 	bool do_create = false;
344 
345 	worker = container_of(cb, struct io_worker, create_work);
346 	wq = worker->wq;
347 	acct = &wq->acct[worker->create_index];
348 	raw_spin_lock(&wq->lock);
349 
350 	if (acct->nr_workers < acct->max_workers) {
351 		acct->nr_workers++;
352 		do_create = true;
353 	}
354 	raw_spin_unlock(&wq->lock);
355 	if (do_create) {
356 		create_io_worker(wq, worker->create_index);
357 	} else {
358 		atomic_dec(&acct->nr_running);
359 		io_worker_ref_put(wq);
360 	}
361 	clear_bit_unlock(0, &worker->create_state);
362 	io_worker_release(worker);
363 }
364 
io_queue_worker_create(struct io_worker * worker,struct io_wq_acct * acct,task_work_func_t func)365 static bool io_queue_worker_create(struct io_worker *worker,
366 				   struct io_wq_acct *acct,
367 				   task_work_func_t func)
368 {
369 	struct io_wq *wq = worker->wq;
370 
371 	/* raced with exit, just ignore create call */
372 	if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
373 		goto fail;
374 	if (!io_worker_get(worker))
375 		goto fail;
376 	/*
377 	 * create_state manages ownership of create_work/index. We should
378 	 * only need one entry per worker, as the worker going to sleep
379 	 * will trigger the condition, and waking will clear it once it
380 	 * runs the task_work.
381 	 */
382 	if (test_bit(0, &worker->create_state) ||
383 	    test_and_set_bit_lock(0, &worker->create_state))
384 		goto fail_release;
385 
386 	atomic_inc(&wq->worker_refs);
387 	init_task_work(&worker->create_work, func);
388 	worker->create_index = acct->index;
389 	if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) {
390 		/*
391 		 * EXIT may have been set after checking it above, check after
392 		 * adding the task_work and remove any creation item if it is
393 		 * now set. wq exit does that too, but we can have added this
394 		 * work item after we canceled in io_wq_exit_workers().
395 		 */
396 		if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
397 			io_wq_cancel_tw_create(wq);
398 		io_worker_ref_put(wq);
399 		return true;
400 	}
401 	io_worker_ref_put(wq);
402 	clear_bit_unlock(0, &worker->create_state);
403 fail_release:
404 	io_worker_release(worker);
405 fail:
406 	atomic_dec(&acct->nr_running);
407 	io_worker_ref_put(wq);
408 	return false;
409 }
410 
io_wq_dec_running(struct io_worker * worker)411 static void io_wq_dec_running(struct io_worker *worker)
412 {
413 	struct io_wq_acct *acct = io_wq_get_acct(worker);
414 	struct io_wq *wq = worker->wq;
415 
416 	if (!test_bit(IO_WORKER_F_UP, &worker->flags))
417 		return;
418 
419 	if (!atomic_dec_and_test(&acct->nr_running))
420 		return;
421 	if (!io_acct_run_queue(acct))
422 		return;
423 
424 	raw_spin_unlock(&acct->lock);
425 	atomic_inc(&acct->nr_running);
426 	atomic_inc(&wq->worker_refs);
427 	io_queue_worker_create(worker, acct, create_worker_cb);
428 }
429 
430 /*
431  * Worker will start processing some work. Move it to the busy list, if
432  * it's currently on the freelist
433  */
__io_worker_busy(struct io_wq * wq,struct io_worker * worker)434 static void __io_worker_busy(struct io_wq *wq, struct io_worker *worker)
435 {
436 	if (test_bit(IO_WORKER_F_FREE, &worker->flags)) {
437 		clear_bit(IO_WORKER_F_FREE, &worker->flags);
438 		raw_spin_lock(&wq->lock);
439 		hlist_nulls_del_init_rcu(&worker->nulls_node);
440 		raw_spin_unlock(&wq->lock);
441 	}
442 }
443 
444 /*
445  * No work, worker going to sleep. Move to freelist.
446  */
__io_worker_idle(struct io_wq * wq,struct io_worker * worker)447 static void __io_worker_idle(struct io_wq *wq, struct io_worker *worker)
448 	__must_hold(wq->lock)
449 {
450 	if (!test_bit(IO_WORKER_F_FREE, &worker->flags)) {
451 		set_bit(IO_WORKER_F_FREE, &worker->flags);
452 		hlist_nulls_add_head_rcu(&worker->nulls_node, &wq->free_list);
453 	}
454 }
455 
io_get_work_hash(struct io_wq_work * work)456 static inline unsigned int io_get_work_hash(struct io_wq_work *work)
457 {
458 	return work->flags >> IO_WQ_HASH_SHIFT;
459 }
460 
io_wait_on_hash(struct io_wq * wq,unsigned int hash)461 static bool io_wait_on_hash(struct io_wq *wq, unsigned int hash)
462 {
463 	bool ret = false;
464 
465 	spin_lock_irq(&wq->hash->wait.lock);
466 	if (list_empty(&wq->wait.entry)) {
467 		__add_wait_queue(&wq->hash->wait, &wq->wait);
468 		if (!test_bit(hash, &wq->hash->map)) {
469 			__set_current_state(TASK_RUNNING);
470 			list_del_init(&wq->wait.entry);
471 			ret = true;
472 		}
473 	}
474 	spin_unlock_irq(&wq->hash->wait.lock);
475 	return ret;
476 }
477 
io_get_next_work(struct io_wq_acct * acct,struct io_worker * worker)478 static struct io_wq_work *io_get_next_work(struct io_wq_acct *acct,
479 					   struct io_worker *worker)
480 	__must_hold(acct->lock)
481 {
482 	struct io_wq_work_node *node, *prev;
483 	struct io_wq_work *work, *tail;
484 	unsigned int stall_hash = -1U;
485 	struct io_wq *wq = worker->wq;
486 
487 	wq_list_for_each(node, prev, &acct->work_list) {
488 		unsigned int hash;
489 
490 		work = container_of(node, struct io_wq_work, list);
491 
492 		/* not hashed, can run anytime */
493 		if (!io_wq_is_hashed(work)) {
494 			wq_list_del(&acct->work_list, node, prev);
495 			return work;
496 		}
497 
498 		hash = io_get_work_hash(work);
499 		/* all items with this hash lie in [work, tail] */
500 		tail = wq->hash_tail[hash];
501 
502 		/* hashed, can run if not already running */
503 		if (!test_and_set_bit(hash, &wq->hash->map)) {
504 			wq->hash_tail[hash] = NULL;
505 			wq_list_cut(&acct->work_list, &tail->list, prev);
506 			return work;
507 		}
508 		if (stall_hash == -1U)
509 			stall_hash = hash;
510 		/* fast forward to a next hash, for-each will fix up @prev */
511 		node = &tail->list;
512 	}
513 
514 	if (stall_hash != -1U) {
515 		bool unstalled;
516 
517 		/*
518 		 * Set this before dropping the lock to avoid racing with new
519 		 * work being added and clearing the stalled bit.
520 		 */
521 		set_bit(IO_ACCT_STALLED_BIT, &acct->flags);
522 		raw_spin_unlock(&acct->lock);
523 		unstalled = io_wait_on_hash(wq, stall_hash);
524 		raw_spin_lock(&acct->lock);
525 		if (unstalled) {
526 			clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
527 			if (wq_has_sleeper(&wq->hash->wait))
528 				wake_up(&wq->hash->wait);
529 		}
530 	}
531 
532 	return NULL;
533 }
534 
io_assign_current_work(struct io_worker * worker,struct io_wq_work * work)535 static void io_assign_current_work(struct io_worker *worker,
536 				   struct io_wq_work *work)
537 {
538 	if (work) {
539 		io_run_task_work();
540 		cond_resched();
541 	}
542 
543 	raw_spin_lock(&worker->lock);
544 	worker->cur_work = work;
545 	worker->next_work = NULL;
546 	raw_spin_unlock(&worker->lock);
547 }
548 
549 /*
550  * Called with acct->lock held, drops it before returning
551  */
io_worker_handle_work(struct io_wq_acct * acct,struct io_worker * worker)552 static void io_worker_handle_work(struct io_wq_acct *acct,
553 				  struct io_worker *worker)
554 	__releases(&acct->lock)
555 {
556 	struct io_wq *wq = worker->wq;
557 	bool do_kill = test_bit(IO_WQ_BIT_EXIT, &wq->state);
558 
559 	do {
560 		struct io_wq_work *work;
561 
562 		/*
563 		 * If we got some work, mark us as busy. If we didn't, but
564 		 * the list isn't empty, it means we stalled on hashed work.
565 		 * Mark us stalled so we don't keep looking for work when we
566 		 * can't make progress, any work completion or insertion will
567 		 * clear the stalled flag.
568 		 */
569 		work = io_get_next_work(acct, worker);
570 		if (work) {
571 			/*
572 			 * Make sure cancelation can find this, even before
573 			 * it becomes the active work. That avoids a window
574 			 * where the work has been removed from our general
575 			 * work list, but isn't yet discoverable as the
576 			 * current work item for this worker.
577 			 */
578 			raw_spin_lock(&worker->lock);
579 			worker->next_work = work;
580 			raw_spin_unlock(&worker->lock);
581 		}
582 
583 		raw_spin_unlock(&acct->lock);
584 
585 		if (!work)
586 			break;
587 
588 		__io_worker_busy(wq, worker);
589 
590 		io_assign_current_work(worker, work);
591 		__set_current_state(TASK_RUNNING);
592 
593 		/* handle a whole dependent link */
594 		do {
595 			struct io_wq_work *next_hashed, *linked;
596 			unsigned int hash = io_get_work_hash(work);
597 
598 			next_hashed = wq_next_work(work);
599 
600 			if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND))
601 				work->flags |= IO_WQ_WORK_CANCEL;
602 			wq->do_work(work);
603 			io_assign_current_work(worker, NULL);
604 
605 			linked = wq->free_work(work);
606 			work = next_hashed;
607 			if (!work && linked && !io_wq_is_hashed(linked)) {
608 				work = linked;
609 				linked = NULL;
610 			}
611 			io_assign_current_work(worker, work);
612 			if (linked)
613 				io_wq_enqueue(wq, linked);
614 
615 			if (hash != -1U && !next_hashed) {
616 				/* serialize hash clear with wake_up() */
617 				spin_lock_irq(&wq->hash->wait.lock);
618 				clear_bit(hash, &wq->hash->map);
619 				clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
620 				spin_unlock_irq(&wq->hash->wait.lock);
621 				if (wq_has_sleeper(&wq->hash->wait))
622 					wake_up(&wq->hash->wait);
623 			}
624 		} while (work);
625 
626 		if (!__io_acct_run_queue(acct))
627 			break;
628 		raw_spin_lock(&acct->lock);
629 	} while (1);
630 }
631 
io_wq_worker(void * data)632 static int io_wq_worker(void *data)
633 {
634 	struct io_worker *worker = data;
635 	struct io_wq_acct *acct = io_wq_get_acct(worker);
636 	struct io_wq *wq = worker->wq;
637 	bool exit_mask = false, last_timeout = false;
638 	char buf[TASK_COMM_LEN];
639 
640 	set_mask_bits(&worker->flags, 0,
641 		      BIT(IO_WORKER_F_UP) | BIT(IO_WORKER_F_RUNNING));
642 
643 	snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task->pid);
644 	set_task_comm(current, buf);
645 
646 	while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
647 		long ret;
648 
649 		set_current_state(TASK_INTERRUPTIBLE);
650 
651 		/*
652 		 * If we have work to do, io_acct_run_queue() returns with
653 		 * the acct->lock held. If not, it will drop it.
654 		 */
655 		while (io_acct_run_queue(acct))
656 			io_worker_handle_work(acct, worker);
657 
658 		raw_spin_lock(&wq->lock);
659 		/*
660 		 * Last sleep timed out. Exit if we're not the last worker,
661 		 * or if someone modified our affinity.
662 		 */
663 		if (last_timeout && (exit_mask || acct->nr_workers > 1)) {
664 			acct->nr_workers--;
665 			raw_spin_unlock(&wq->lock);
666 			__set_current_state(TASK_RUNNING);
667 			break;
668 		}
669 		last_timeout = false;
670 		__io_worker_idle(wq, worker);
671 		raw_spin_unlock(&wq->lock);
672 		if (io_run_task_work())
673 			continue;
674 		ret = schedule_timeout(WORKER_IDLE_TIMEOUT);
675 		if (signal_pending(current)) {
676 			struct ksignal ksig;
677 
678 			if (!get_signal(&ksig))
679 				continue;
680 			break;
681 		}
682 		if (!ret) {
683 			last_timeout = true;
684 			exit_mask = !cpumask_test_cpu(raw_smp_processor_id(),
685 							wq->cpu_mask);
686 		}
687 	}
688 
689 	if (test_bit(IO_WQ_BIT_EXIT, &wq->state) && io_acct_run_queue(acct))
690 		io_worker_handle_work(acct, worker);
691 
692 	io_worker_exit(worker);
693 	return 0;
694 }
695 
696 /*
697  * Called when a worker is scheduled in. Mark us as currently running.
698  */
io_wq_worker_running(struct task_struct * tsk)699 void io_wq_worker_running(struct task_struct *tsk)
700 {
701 	struct io_worker *worker = tsk->worker_private;
702 
703 	if (!worker)
704 		return;
705 	if (!test_bit(IO_WORKER_F_UP, &worker->flags))
706 		return;
707 	if (test_bit(IO_WORKER_F_RUNNING, &worker->flags))
708 		return;
709 	set_bit(IO_WORKER_F_RUNNING, &worker->flags);
710 	io_wq_inc_running(worker);
711 }
712 
713 /*
714  * Called when worker is going to sleep. If there are no workers currently
715  * running and we have work pending, wake up a free one or create a new one.
716  */
io_wq_worker_sleeping(struct task_struct * tsk)717 void io_wq_worker_sleeping(struct task_struct *tsk)
718 {
719 	struct io_worker *worker = tsk->worker_private;
720 
721 	if (!worker)
722 		return;
723 	if (!test_bit(IO_WORKER_F_UP, &worker->flags))
724 		return;
725 	if (!test_bit(IO_WORKER_F_RUNNING, &worker->flags))
726 		return;
727 
728 	clear_bit(IO_WORKER_F_RUNNING, &worker->flags);
729 	io_wq_dec_running(worker);
730 }
731 
io_init_new_worker(struct io_wq * wq,struct io_worker * worker,struct task_struct * tsk)732 static void io_init_new_worker(struct io_wq *wq, struct io_worker *worker,
733 			       struct task_struct *tsk)
734 {
735 	tsk->worker_private = worker;
736 	worker->task = tsk;
737 	set_cpus_allowed_ptr(tsk, wq->cpu_mask);
738 
739 	raw_spin_lock(&wq->lock);
740 	hlist_nulls_add_head_rcu(&worker->nulls_node, &wq->free_list);
741 	list_add_tail_rcu(&worker->all_list, &wq->all_list);
742 	set_bit(IO_WORKER_F_FREE, &worker->flags);
743 	raw_spin_unlock(&wq->lock);
744 	wake_up_new_task(tsk);
745 }
746 
io_wq_work_match_all(struct io_wq_work * work,void * data)747 static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
748 {
749 	return true;
750 }
751 
io_should_retry_thread(struct io_worker * worker,long err)752 static inline bool io_should_retry_thread(struct io_worker *worker, long err)
753 {
754 	/*
755 	 * Prevent perpetual task_work retry, if the task (or its group) is
756 	 * exiting.
757 	 */
758 	if (fatal_signal_pending(current))
759 		return false;
760 	if (worker->init_retries++ >= WORKER_INIT_LIMIT)
761 		return false;
762 
763 	switch (err) {
764 	case -EAGAIN:
765 	case -ERESTARTSYS:
766 	case -ERESTARTNOINTR:
767 	case -ERESTARTNOHAND:
768 		return true;
769 	default:
770 		return false;
771 	}
772 }
773 
queue_create_worker_retry(struct io_worker * worker)774 static void queue_create_worker_retry(struct io_worker *worker)
775 {
776 	/*
777 	 * We only bother retrying because there's a chance that the
778 	 * failure to create a worker is due to some temporary condition
779 	 * in the forking task (e.g. outstanding signal); give the task
780 	 * some time to clear that condition.
781 	 */
782 	schedule_delayed_work(&worker->work,
783 			      msecs_to_jiffies(worker->init_retries * 5));
784 }
785 
create_worker_cont(struct callback_head * cb)786 static void create_worker_cont(struct callback_head *cb)
787 {
788 	struct io_worker *worker;
789 	struct task_struct *tsk;
790 	struct io_wq *wq;
791 
792 	worker = container_of(cb, struct io_worker, create_work);
793 	clear_bit_unlock(0, &worker->create_state);
794 	wq = worker->wq;
795 	tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE);
796 	if (!IS_ERR(tsk)) {
797 		io_init_new_worker(wq, worker, tsk);
798 		io_worker_release(worker);
799 		return;
800 	} else if (!io_should_retry_thread(worker, PTR_ERR(tsk))) {
801 		struct io_wq_acct *acct = io_wq_get_acct(worker);
802 
803 		atomic_dec(&acct->nr_running);
804 		raw_spin_lock(&wq->lock);
805 		acct->nr_workers--;
806 		if (!acct->nr_workers) {
807 			struct io_cb_cancel_data match = {
808 				.fn		= io_wq_work_match_all,
809 				.cancel_all	= true,
810 			};
811 
812 			raw_spin_unlock(&wq->lock);
813 			while (io_acct_cancel_pending_work(wq, acct, &match))
814 				;
815 		} else {
816 			raw_spin_unlock(&wq->lock);
817 		}
818 		io_worker_ref_put(wq);
819 		kfree(worker);
820 		return;
821 	}
822 
823 	/* re-create attempts grab a new worker ref, drop the existing one */
824 	io_worker_release(worker);
825 	queue_create_worker_retry(worker);
826 }
827 
io_workqueue_create(struct work_struct * work)828 static void io_workqueue_create(struct work_struct *work)
829 {
830 	struct io_worker *worker = container_of(work, struct io_worker,
831 						work.work);
832 	struct io_wq_acct *acct = io_wq_get_acct(worker);
833 
834 	if (!io_queue_worker_create(worker, acct, create_worker_cont))
835 		kfree(worker);
836 }
837 
create_io_worker(struct io_wq * wq,int index)838 static bool create_io_worker(struct io_wq *wq, int index)
839 {
840 	struct io_wq_acct *acct = &wq->acct[index];
841 	struct io_worker *worker;
842 	struct task_struct *tsk;
843 
844 	__set_current_state(TASK_RUNNING);
845 
846 	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
847 	if (!worker) {
848 fail:
849 		atomic_dec(&acct->nr_running);
850 		raw_spin_lock(&wq->lock);
851 		acct->nr_workers--;
852 		raw_spin_unlock(&wq->lock);
853 		io_worker_ref_put(wq);
854 		return false;
855 	}
856 
857 	refcount_set(&worker->ref, 1);
858 	worker->wq = wq;
859 	raw_spin_lock_init(&worker->lock);
860 	init_completion(&worker->ref_done);
861 
862 	if (index == IO_WQ_ACCT_BOUND)
863 		set_bit(IO_WORKER_F_BOUND, &worker->flags);
864 
865 	tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE);
866 	if (!IS_ERR(tsk)) {
867 		io_init_new_worker(wq, worker, tsk);
868 	} else if (!io_should_retry_thread(worker, PTR_ERR(tsk))) {
869 		kfree(worker);
870 		goto fail;
871 	} else {
872 		INIT_DELAYED_WORK(&worker->work, io_workqueue_create);
873 		queue_create_worker_retry(worker);
874 	}
875 
876 	return true;
877 }
878 
879 /*
880  * Iterate the passed in list and call the specific function for each
881  * worker that isn't exiting
882  */
io_wq_for_each_worker(struct io_wq * wq,bool (* func)(struct io_worker *,void *),void * data)883 static bool io_wq_for_each_worker(struct io_wq *wq,
884 				  bool (*func)(struct io_worker *, void *),
885 				  void *data)
886 {
887 	struct io_worker *worker;
888 	bool ret = false;
889 
890 	list_for_each_entry_rcu(worker, &wq->all_list, all_list) {
891 		if (io_worker_get(worker)) {
892 			/* no task if node is/was offline */
893 			if (worker->task)
894 				ret = func(worker, data);
895 			io_worker_release(worker);
896 			if (ret)
897 				break;
898 		}
899 	}
900 
901 	return ret;
902 }
903 
io_wq_worker_wake(struct io_worker * worker,void * data)904 static bool io_wq_worker_wake(struct io_worker *worker, void *data)
905 {
906 	__set_notify_signal(worker->task);
907 	wake_up_process(worker->task);
908 	return false;
909 }
910 
io_run_cancel(struct io_wq_work * work,struct io_wq * wq)911 static void io_run_cancel(struct io_wq_work *work, struct io_wq *wq)
912 {
913 	do {
914 		work->flags |= IO_WQ_WORK_CANCEL;
915 		wq->do_work(work);
916 		work = wq->free_work(work);
917 	} while (work);
918 }
919 
io_wq_insert_work(struct io_wq * wq,struct io_wq_work * work)920 static void io_wq_insert_work(struct io_wq *wq, struct io_wq_work *work)
921 {
922 	struct io_wq_acct *acct = io_work_get_acct(wq, work);
923 	unsigned int hash;
924 	struct io_wq_work *tail;
925 
926 	if (!io_wq_is_hashed(work)) {
927 append:
928 		wq_list_add_tail(&work->list, &acct->work_list);
929 		return;
930 	}
931 
932 	hash = io_get_work_hash(work);
933 	tail = wq->hash_tail[hash];
934 	wq->hash_tail[hash] = work;
935 	if (!tail)
936 		goto append;
937 
938 	wq_list_add_after(&work->list, &tail->list, &acct->work_list);
939 }
940 
io_wq_work_match_item(struct io_wq_work * work,void * data)941 static bool io_wq_work_match_item(struct io_wq_work *work, void *data)
942 {
943 	return work == data;
944 }
945 
io_wq_enqueue(struct io_wq * wq,struct io_wq_work * work)946 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
947 {
948 	struct io_wq_acct *acct = io_work_get_acct(wq, work);
949 	unsigned long work_flags = work->flags;
950 	struct io_cb_cancel_data match = {
951 		.fn		= io_wq_work_match_item,
952 		.data		= work,
953 		.cancel_all	= false,
954 	};
955 	bool do_create;
956 
957 	/*
958 	 * If io-wq is exiting for this task, or if the request has explicitly
959 	 * been marked as one that should not get executed, cancel it here.
960 	 */
961 	if (test_bit(IO_WQ_BIT_EXIT, &wq->state) ||
962 	    (work->flags & IO_WQ_WORK_CANCEL)) {
963 		io_run_cancel(work, wq);
964 		return;
965 	}
966 
967 	raw_spin_lock(&acct->lock);
968 	io_wq_insert_work(wq, work);
969 	clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
970 	raw_spin_unlock(&acct->lock);
971 
972 	rcu_read_lock();
973 	do_create = !io_wq_activate_free_worker(wq, acct);
974 	rcu_read_unlock();
975 
976 	if (do_create && ((work_flags & IO_WQ_WORK_CONCURRENT) ||
977 	    !atomic_read(&acct->nr_running))) {
978 		bool did_create;
979 
980 		did_create = io_wq_create_worker(wq, acct);
981 		if (likely(did_create))
982 			return;
983 
984 		raw_spin_lock(&wq->lock);
985 		if (acct->nr_workers) {
986 			raw_spin_unlock(&wq->lock);
987 			return;
988 		}
989 		raw_spin_unlock(&wq->lock);
990 
991 		/* fatal condition, failed to create the first worker */
992 		io_acct_cancel_pending_work(wq, acct, &match);
993 	}
994 }
995 
996 /*
997  * Work items that hash to the same value will not be done in parallel.
998  * Used to limit concurrent writes, generally hashed by inode.
999  */
io_wq_hash_work(struct io_wq_work * work,void * val)1000 void io_wq_hash_work(struct io_wq_work *work, void *val)
1001 {
1002 	unsigned int bit;
1003 
1004 	bit = hash_ptr(val, IO_WQ_HASH_ORDER);
1005 	work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
1006 }
1007 
__io_wq_worker_cancel(struct io_worker * worker,struct io_cb_cancel_data * match,struct io_wq_work * work)1008 static bool __io_wq_worker_cancel(struct io_worker *worker,
1009 				  struct io_cb_cancel_data *match,
1010 				  struct io_wq_work *work)
1011 {
1012 	if (work && match->fn(work, match->data)) {
1013 		work->flags |= IO_WQ_WORK_CANCEL;
1014 		__set_notify_signal(worker->task);
1015 		return true;
1016 	}
1017 
1018 	return false;
1019 }
1020 
io_wq_worker_cancel(struct io_worker * worker,void * data)1021 static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
1022 {
1023 	struct io_cb_cancel_data *match = data;
1024 
1025 	/*
1026 	 * Hold the lock to avoid ->cur_work going out of scope, caller
1027 	 * may dereference the passed in work.
1028 	 */
1029 	raw_spin_lock(&worker->lock);
1030 	if (__io_wq_worker_cancel(worker, match, worker->cur_work) ||
1031 	    __io_wq_worker_cancel(worker, match, worker->next_work))
1032 		match->nr_running++;
1033 	raw_spin_unlock(&worker->lock);
1034 
1035 	return match->nr_running && !match->cancel_all;
1036 }
1037 
io_wq_remove_pending(struct io_wq * wq,struct io_wq_work * work,struct io_wq_work_node * prev)1038 static inline void io_wq_remove_pending(struct io_wq *wq,
1039 					 struct io_wq_work *work,
1040 					 struct io_wq_work_node *prev)
1041 {
1042 	struct io_wq_acct *acct = io_work_get_acct(wq, work);
1043 	unsigned int hash = io_get_work_hash(work);
1044 	struct io_wq_work *prev_work = NULL;
1045 
1046 	if (io_wq_is_hashed(work) && work == wq->hash_tail[hash]) {
1047 		if (prev)
1048 			prev_work = container_of(prev, struct io_wq_work, list);
1049 		if (prev_work && io_get_work_hash(prev_work) == hash)
1050 			wq->hash_tail[hash] = prev_work;
1051 		else
1052 			wq->hash_tail[hash] = NULL;
1053 	}
1054 	wq_list_del(&acct->work_list, &work->list, prev);
1055 }
1056 
io_acct_cancel_pending_work(struct io_wq * wq,struct io_wq_acct * acct,struct io_cb_cancel_data * match)1057 static bool io_acct_cancel_pending_work(struct io_wq *wq,
1058 					struct io_wq_acct *acct,
1059 					struct io_cb_cancel_data *match)
1060 {
1061 	struct io_wq_work_node *node, *prev;
1062 	struct io_wq_work *work;
1063 
1064 	raw_spin_lock(&acct->lock);
1065 	wq_list_for_each(node, prev, &acct->work_list) {
1066 		work = container_of(node, struct io_wq_work, list);
1067 		if (!match->fn(work, match->data))
1068 			continue;
1069 		io_wq_remove_pending(wq, work, prev);
1070 		raw_spin_unlock(&acct->lock);
1071 		io_run_cancel(work, wq);
1072 		match->nr_pending++;
1073 		/* not safe to continue after unlock */
1074 		return true;
1075 	}
1076 	raw_spin_unlock(&acct->lock);
1077 
1078 	return false;
1079 }
1080 
io_wq_cancel_pending_work(struct io_wq * wq,struct io_cb_cancel_data * match)1081 static void io_wq_cancel_pending_work(struct io_wq *wq,
1082 				      struct io_cb_cancel_data *match)
1083 {
1084 	int i;
1085 retry:
1086 	for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1087 		struct io_wq_acct *acct = io_get_acct(wq, i == 0);
1088 
1089 		if (io_acct_cancel_pending_work(wq, acct, match)) {
1090 			if (match->cancel_all)
1091 				goto retry;
1092 			break;
1093 		}
1094 	}
1095 }
1096 
io_wq_cancel_running_work(struct io_wq * wq,struct io_cb_cancel_data * match)1097 static void io_wq_cancel_running_work(struct io_wq *wq,
1098 				       struct io_cb_cancel_data *match)
1099 {
1100 	rcu_read_lock();
1101 	io_wq_for_each_worker(wq, io_wq_worker_cancel, match);
1102 	rcu_read_unlock();
1103 }
1104 
io_wq_cancel_cb(struct io_wq * wq,work_cancel_fn * cancel,void * data,bool cancel_all)1105 enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
1106 				  void *data, bool cancel_all)
1107 {
1108 	struct io_cb_cancel_data match = {
1109 		.fn		= cancel,
1110 		.data		= data,
1111 		.cancel_all	= cancel_all,
1112 	};
1113 
1114 	/*
1115 	 * First check pending list, if we're lucky we can just remove it
1116 	 * from there. CANCEL_OK means that the work is returned as-new,
1117 	 * no completion will be posted for it.
1118 	 *
1119 	 * Then check if a free (going busy) or busy worker has the work
1120 	 * currently running. If we find it there, we'll return CANCEL_RUNNING
1121 	 * as an indication that we attempt to signal cancellation. The
1122 	 * completion will run normally in this case.
1123 	 *
1124 	 * Do both of these while holding the wq->lock, to ensure that
1125 	 * we'll find a work item regardless of state.
1126 	 */
1127 	io_wq_cancel_pending_work(wq, &match);
1128 	if (match.nr_pending && !match.cancel_all)
1129 		return IO_WQ_CANCEL_OK;
1130 
1131 	raw_spin_lock(&wq->lock);
1132 	io_wq_cancel_running_work(wq, &match);
1133 	raw_spin_unlock(&wq->lock);
1134 	if (match.nr_running && !match.cancel_all)
1135 		return IO_WQ_CANCEL_RUNNING;
1136 
1137 	if (match.nr_running)
1138 		return IO_WQ_CANCEL_RUNNING;
1139 	if (match.nr_pending)
1140 		return IO_WQ_CANCEL_OK;
1141 	return IO_WQ_CANCEL_NOTFOUND;
1142 }
1143 
io_wq_hash_wake(struct wait_queue_entry * wait,unsigned mode,int sync,void * key)1144 static int io_wq_hash_wake(struct wait_queue_entry *wait, unsigned mode,
1145 			    int sync, void *key)
1146 {
1147 	struct io_wq *wq = container_of(wait, struct io_wq, wait);
1148 	int i;
1149 
1150 	list_del_init(&wait->entry);
1151 
1152 	rcu_read_lock();
1153 	for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1154 		struct io_wq_acct *acct = &wq->acct[i];
1155 
1156 		if (test_and_clear_bit(IO_ACCT_STALLED_BIT, &acct->flags))
1157 			io_wq_activate_free_worker(wq, acct);
1158 	}
1159 	rcu_read_unlock();
1160 	return 1;
1161 }
1162 
io_wq_create(unsigned bounded,struct io_wq_data * data)1163 struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
1164 {
1165 	int ret, i;
1166 	struct io_wq *wq;
1167 
1168 	if (WARN_ON_ONCE(!data->free_work || !data->do_work))
1169 		return ERR_PTR(-EINVAL);
1170 	if (WARN_ON_ONCE(!bounded))
1171 		return ERR_PTR(-EINVAL);
1172 
1173 	wq = kzalloc(sizeof(struct io_wq), GFP_KERNEL);
1174 	if (!wq)
1175 		return ERR_PTR(-ENOMEM);
1176 
1177 	refcount_inc(&data->hash->refs);
1178 	wq->hash = data->hash;
1179 	wq->free_work = data->free_work;
1180 	wq->do_work = data->do_work;
1181 
1182 	ret = -ENOMEM;
1183 
1184 	if (!alloc_cpumask_var(&wq->cpu_mask, GFP_KERNEL))
1185 		goto err;
1186 	cpuset_cpus_allowed(data->task, wq->cpu_mask);
1187 	wq->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
1188 	wq->acct[IO_WQ_ACCT_UNBOUND].max_workers =
1189 				task_rlimit(current, RLIMIT_NPROC);
1190 	INIT_LIST_HEAD(&wq->wait.entry);
1191 	wq->wait.func = io_wq_hash_wake;
1192 	for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1193 		struct io_wq_acct *acct = &wq->acct[i];
1194 
1195 		acct->index = i;
1196 		atomic_set(&acct->nr_running, 0);
1197 		INIT_WQ_LIST(&acct->work_list);
1198 		raw_spin_lock_init(&acct->lock);
1199 	}
1200 
1201 	raw_spin_lock_init(&wq->lock);
1202 	INIT_HLIST_NULLS_HEAD(&wq->free_list, 0);
1203 	INIT_LIST_HEAD(&wq->all_list);
1204 
1205 	wq->task = get_task_struct(data->task);
1206 	atomic_set(&wq->worker_refs, 1);
1207 	init_completion(&wq->worker_done);
1208 	ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1209 	if (ret)
1210 		goto err;
1211 
1212 	return wq;
1213 err:
1214 	io_wq_put_hash(data->hash);
1215 	free_cpumask_var(wq->cpu_mask);
1216 	kfree(wq);
1217 	return ERR_PTR(ret);
1218 }
1219 
io_task_work_match(struct callback_head * cb,void * data)1220 static bool io_task_work_match(struct callback_head *cb, void *data)
1221 {
1222 	struct io_worker *worker;
1223 
1224 	if (cb->func != create_worker_cb && cb->func != create_worker_cont)
1225 		return false;
1226 	worker = container_of(cb, struct io_worker, create_work);
1227 	return worker->wq == data;
1228 }
1229 
io_wq_exit_start(struct io_wq * wq)1230 void io_wq_exit_start(struct io_wq *wq)
1231 {
1232 	set_bit(IO_WQ_BIT_EXIT, &wq->state);
1233 }
1234 
io_wq_cancel_tw_create(struct io_wq * wq)1235 static void io_wq_cancel_tw_create(struct io_wq *wq)
1236 {
1237 	struct callback_head *cb;
1238 
1239 	while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
1240 		struct io_worker *worker;
1241 
1242 		worker = container_of(cb, struct io_worker, create_work);
1243 		io_worker_cancel_cb(worker);
1244 		/*
1245 		 * Only the worker continuation helper has worker allocated and
1246 		 * hence needs freeing.
1247 		 */
1248 		if (cb->func == create_worker_cont)
1249 			kfree(worker);
1250 	}
1251 }
1252 
io_wq_exit_workers(struct io_wq * wq)1253 static void io_wq_exit_workers(struct io_wq *wq)
1254 {
1255 	if (!wq->task)
1256 		return;
1257 
1258 	io_wq_cancel_tw_create(wq);
1259 
1260 	rcu_read_lock();
1261 	io_wq_for_each_worker(wq, io_wq_worker_wake, NULL);
1262 	rcu_read_unlock();
1263 	io_worker_ref_put(wq);
1264 	wait_for_completion(&wq->worker_done);
1265 
1266 	spin_lock_irq(&wq->hash->wait.lock);
1267 	list_del_init(&wq->wait.entry);
1268 	spin_unlock_irq(&wq->hash->wait.lock);
1269 
1270 	put_task_struct(wq->task);
1271 	wq->task = NULL;
1272 }
1273 
io_wq_destroy(struct io_wq * wq)1274 static void io_wq_destroy(struct io_wq *wq)
1275 {
1276 	struct io_cb_cancel_data match = {
1277 		.fn		= io_wq_work_match_all,
1278 		.cancel_all	= true,
1279 	};
1280 
1281 	cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1282 	io_wq_cancel_pending_work(wq, &match);
1283 	free_cpumask_var(wq->cpu_mask);
1284 	io_wq_put_hash(wq->hash);
1285 	kfree(wq);
1286 }
1287 
io_wq_put_and_exit(struct io_wq * wq)1288 void io_wq_put_and_exit(struct io_wq *wq)
1289 {
1290 	WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT, &wq->state));
1291 
1292 	io_wq_exit_workers(wq);
1293 	io_wq_destroy(wq);
1294 }
1295 
1296 struct online_data {
1297 	unsigned int cpu;
1298 	bool online;
1299 };
1300 
io_wq_worker_affinity(struct io_worker * worker,void * data)1301 static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
1302 {
1303 	struct online_data *od = data;
1304 
1305 	if (od->online)
1306 		cpumask_set_cpu(od->cpu, worker->wq->cpu_mask);
1307 	else
1308 		cpumask_clear_cpu(od->cpu, worker->wq->cpu_mask);
1309 	return false;
1310 }
1311 
__io_wq_cpu_online(struct io_wq * wq,unsigned int cpu,bool online)1312 static int __io_wq_cpu_online(struct io_wq *wq, unsigned int cpu, bool online)
1313 {
1314 	struct online_data od = {
1315 		.cpu = cpu,
1316 		.online = online
1317 	};
1318 
1319 	rcu_read_lock();
1320 	io_wq_for_each_worker(wq, io_wq_worker_affinity, &od);
1321 	rcu_read_unlock();
1322 	return 0;
1323 }
1324 
io_wq_cpu_online(unsigned int cpu,struct hlist_node * node)1325 static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node)
1326 {
1327 	struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
1328 
1329 	return __io_wq_cpu_online(wq, cpu, true);
1330 }
1331 
io_wq_cpu_offline(unsigned int cpu,struct hlist_node * node)1332 static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
1333 {
1334 	struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
1335 
1336 	return __io_wq_cpu_online(wq, cpu, false);
1337 }
1338 
io_wq_cpu_affinity(struct io_uring_task * tctx,cpumask_var_t mask)1339 int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask)
1340 {
1341 	cpumask_var_t allowed_mask;
1342 	int ret = 0;
1343 
1344 	if (!tctx || !tctx->io_wq)
1345 		return -EINVAL;
1346 
1347 	if (!alloc_cpumask_var(&allowed_mask, GFP_KERNEL))
1348 		return -ENOMEM;
1349 
1350 	rcu_read_lock();
1351 	cpuset_cpus_allowed(tctx->io_wq->task, allowed_mask);
1352 	if (mask) {
1353 		if (cpumask_subset(mask, allowed_mask))
1354 			cpumask_copy(tctx->io_wq->cpu_mask, mask);
1355 		else
1356 			ret = -EINVAL;
1357 	} else {
1358 		cpumask_copy(tctx->io_wq->cpu_mask, allowed_mask);
1359 	}
1360 	rcu_read_unlock();
1361 
1362 	free_cpumask_var(allowed_mask);
1363 	return ret;
1364 }
1365 
1366 /*
1367  * Set max number of unbounded workers, returns old value. If new_count is 0,
1368  * then just return the old value.
1369  */
io_wq_max_workers(struct io_wq * wq,int * new_count)1370 int io_wq_max_workers(struct io_wq *wq, int *new_count)
1371 {
1372 	struct io_wq_acct *acct;
1373 	int prev[IO_WQ_ACCT_NR];
1374 	int i;
1375 
1376 	BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND   != (int) IO_WQ_BOUND);
1377 	BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND);
1378 	BUILD_BUG_ON((int) IO_WQ_ACCT_NR      != 2);
1379 
1380 	for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1381 		if (new_count[i] > task_rlimit(current, RLIMIT_NPROC))
1382 			new_count[i] = task_rlimit(current, RLIMIT_NPROC);
1383 	}
1384 
1385 	for (i = 0; i < IO_WQ_ACCT_NR; i++)
1386 		prev[i] = 0;
1387 
1388 	rcu_read_lock();
1389 
1390 	raw_spin_lock(&wq->lock);
1391 	for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1392 		acct = &wq->acct[i];
1393 		prev[i] = max_t(int, acct->max_workers, prev[i]);
1394 		if (new_count[i])
1395 			acct->max_workers = new_count[i];
1396 	}
1397 	raw_spin_unlock(&wq->lock);
1398 	rcu_read_unlock();
1399 
1400 	for (i = 0; i < IO_WQ_ACCT_NR; i++)
1401 		new_count[i] = prev[i];
1402 
1403 	return 0;
1404 }
1405 
io_wq_init(void)1406 static __init int io_wq_init(void)
1407 {
1408 	int ret;
1409 
1410 	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online",
1411 					io_wq_cpu_online, io_wq_cpu_offline);
1412 	if (ret < 0)
1413 		return ret;
1414 	io_wq_online = ret;
1415 	return 0;
1416 }
1417 subsys_initcall(io_wq_init);
1418