xref: /openbmc/linux/io_uring/sqpoll.c (revision 01ad0576)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Contains the core associated with submission side polling of the SQ
4  * ring, offloading submissions from the application to a kernel thread.
5  */
6 #include <linux/kernel.h>
7 #include <linux/errno.h>
8 #include <linux/file.h>
9 #include <linux/mm.h>
10 #include <linux/slab.h>
11 #include <linux/audit.h>
12 #include <linux/security.h>
13 #include <linux/cpuset.h>
14 #include <linux/io_uring.h>
15 
16 #include <uapi/linux/io_uring.h>
17 
18 #include "io_uring.h"
19 #include "sqpoll.h"
20 
21 #define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
22 
23 enum {
24 	IO_SQ_THREAD_SHOULD_STOP = 0,
25 	IO_SQ_THREAD_SHOULD_PARK,
26 };
27 
io_sq_thread_unpark(struct io_sq_data * sqd)28 void io_sq_thread_unpark(struct io_sq_data *sqd)
29 	__releases(&sqd->lock)
30 {
31 	WARN_ON_ONCE(sqd->thread == current);
32 
33 	/*
34 	 * Do the dance but not conditional clear_bit() because it'd race with
35 	 * other threads incrementing park_pending and setting the bit.
36 	 */
37 	clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
38 	if (atomic_dec_return(&sqd->park_pending))
39 		set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
40 	mutex_unlock(&sqd->lock);
41 }
42 
io_sq_thread_park(struct io_sq_data * sqd)43 void io_sq_thread_park(struct io_sq_data *sqd)
44 	__acquires(&sqd->lock)
45 {
46 	WARN_ON_ONCE(sqd->thread == current);
47 
48 	atomic_inc(&sqd->park_pending);
49 	set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
50 	mutex_lock(&sqd->lock);
51 	if (sqd->thread)
52 		wake_up_process(sqd->thread);
53 }
54 
io_sq_thread_stop(struct io_sq_data * sqd)55 void io_sq_thread_stop(struct io_sq_data *sqd)
56 {
57 	WARN_ON_ONCE(sqd->thread == current);
58 	WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
59 
60 	set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
61 	mutex_lock(&sqd->lock);
62 	if (sqd->thread)
63 		wake_up_process(sqd->thread);
64 	mutex_unlock(&sqd->lock);
65 	wait_for_completion(&sqd->exited);
66 }
67 
io_put_sq_data(struct io_sq_data * sqd)68 void io_put_sq_data(struct io_sq_data *sqd)
69 {
70 	if (refcount_dec_and_test(&sqd->refs)) {
71 		WARN_ON_ONCE(atomic_read(&sqd->park_pending));
72 
73 		io_sq_thread_stop(sqd);
74 		kfree(sqd);
75 	}
76 }
77 
io_sqd_update_thread_idle(struct io_sq_data * sqd)78 static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd)
79 {
80 	struct io_ring_ctx *ctx;
81 	unsigned sq_thread_idle = 0;
82 
83 	list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
84 		sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle);
85 	sqd->sq_thread_idle = sq_thread_idle;
86 }
87 
io_sq_thread_finish(struct io_ring_ctx * ctx)88 void io_sq_thread_finish(struct io_ring_ctx *ctx)
89 {
90 	struct io_sq_data *sqd = ctx->sq_data;
91 
92 	if (sqd) {
93 		io_sq_thread_park(sqd);
94 		list_del_init(&ctx->sqd_list);
95 		io_sqd_update_thread_idle(sqd);
96 		io_sq_thread_unpark(sqd);
97 
98 		io_put_sq_data(sqd);
99 		ctx->sq_data = NULL;
100 	}
101 }
102 
io_attach_sq_data(struct io_uring_params * p)103 static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
104 {
105 	struct io_ring_ctx *ctx_attach;
106 	struct io_sq_data *sqd;
107 	struct fd f;
108 
109 	f = fdget(p->wq_fd);
110 	if (!f.file)
111 		return ERR_PTR(-ENXIO);
112 	if (!io_is_uring_fops(f.file)) {
113 		fdput(f);
114 		return ERR_PTR(-EINVAL);
115 	}
116 
117 	ctx_attach = f.file->private_data;
118 	sqd = ctx_attach->sq_data;
119 	if (!sqd) {
120 		fdput(f);
121 		return ERR_PTR(-EINVAL);
122 	}
123 	if (sqd->task_tgid != current->tgid) {
124 		fdput(f);
125 		return ERR_PTR(-EPERM);
126 	}
127 
128 	refcount_inc(&sqd->refs);
129 	fdput(f);
130 	return sqd;
131 }
132 
io_get_sq_data(struct io_uring_params * p,bool * attached)133 static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
134 					 bool *attached)
135 {
136 	struct io_sq_data *sqd;
137 
138 	*attached = false;
139 	if (p->flags & IORING_SETUP_ATTACH_WQ) {
140 		sqd = io_attach_sq_data(p);
141 		if (!IS_ERR(sqd)) {
142 			*attached = true;
143 			return sqd;
144 		}
145 		/* fall through for EPERM case, setup new sqd/task */
146 		if (PTR_ERR(sqd) != -EPERM)
147 			return sqd;
148 	}
149 
150 	sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
151 	if (!sqd)
152 		return ERR_PTR(-ENOMEM);
153 
154 	atomic_set(&sqd->park_pending, 0);
155 	refcount_set(&sqd->refs, 1);
156 	INIT_LIST_HEAD(&sqd->ctx_list);
157 	mutex_init(&sqd->lock);
158 	init_waitqueue_head(&sqd->wait);
159 	init_completion(&sqd->exited);
160 	return sqd;
161 }
162 
io_sqd_events_pending(struct io_sq_data * sqd)163 static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
164 {
165 	return READ_ONCE(sqd->state);
166 }
167 
__io_sq_thread(struct io_ring_ctx * ctx,bool cap_entries)168 static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
169 {
170 	unsigned int to_submit;
171 	int ret = 0;
172 
173 	to_submit = io_sqring_entries(ctx);
174 	/* if we're handling multiple rings, cap submit size for fairness */
175 	if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE)
176 		to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE;
177 
178 	if (!wq_list_empty(&ctx->iopoll_list) || to_submit) {
179 		const struct cred *creds = NULL;
180 
181 		if (ctx->sq_creds != current_cred())
182 			creds = override_creds(ctx->sq_creds);
183 
184 		mutex_lock(&ctx->uring_lock);
185 		if (!wq_list_empty(&ctx->iopoll_list))
186 			io_do_iopoll(ctx, true);
187 
188 		/*
189 		 * Don't submit if refs are dying, good for io_uring_register(),
190 		 * but also it is relied upon by io_ring_exit_work()
191 		 */
192 		if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
193 		    !(ctx->flags & IORING_SETUP_R_DISABLED))
194 			ret = io_submit_sqes(ctx, to_submit);
195 		mutex_unlock(&ctx->uring_lock);
196 
197 		if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
198 			wake_up(&ctx->sqo_sq_wait);
199 		if (creds)
200 			revert_creds(creds);
201 	}
202 
203 	return ret;
204 }
205 
io_sqd_handle_event(struct io_sq_data * sqd)206 static bool io_sqd_handle_event(struct io_sq_data *sqd)
207 {
208 	bool did_sig = false;
209 	struct ksignal ksig;
210 
211 	if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
212 	    signal_pending(current)) {
213 		mutex_unlock(&sqd->lock);
214 		if (signal_pending(current))
215 			did_sig = get_signal(&ksig);
216 		cond_resched();
217 		mutex_lock(&sqd->lock);
218 		sqd->sq_cpu = raw_smp_processor_id();
219 	}
220 	return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
221 }
222 
io_sq_thread(void * data)223 static int io_sq_thread(void *data)
224 {
225 	struct io_sq_data *sqd = data;
226 	struct io_ring_ctx *ctx;
227 	unsigned long timeout = 0;
228 	char buf[TASK_COMM_LEN];
229 	DEFINE_WAIT(wait);
230 
231 	snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
232 	set_task_comm(current, buf);
233 
234 	/* reset to our pid after we've set task_comm, for fdinfo */
235 	sqd->task_pid = current->pid;
236 
237 	if (sqd->sq_cpu != -1) {
238 		set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
239 	} else {
240 		set_cpus_allowed_ptr(current, cpu_online_mask);
241 		sqd->sq_cpu = raw_smp_processor_id();
242 	}
243 
244 	/*
245 	 * Force audit context to get setup, in case we do prep side async
246 	 * operations that would trigger an audit call before any issue side
247 	 * audit has been done.
248 	 */
249 	audit_uring_entry(IORING_OP_NOP);
250 	audit_uring_exit(true, 0);
251 
252 	mutex_lock(&sqd->lock);
253 	while (1) {
254 		bool cap_entries, sqt_spin = false;
255 
256 		if (io_sqd_events_pending(sqd) || signal_pending(current)) {
257 			if (io_sqd_handle_event(sqd))
258 				break;
259 			timeout = jiffies + sqd->sq_thread_idle;
260 		}
261 
262 		cap_entries = !list_is_singular(&sqd->ctx_list);
263 		list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
264 			int ret = __io_sq_thread(ctx, cap_entries);
265 
266 			if (!sqt_spin && (ret > 0 || !wq_list_empty(&ctx->iopoll_list)))
267 				sqt_spin = true;
268 		}
269 		if (io_run_task_work())
270 			sqt_spin = true;
271 
272 		if (sqt_spin || !time_after(jiffies, timeout)) {
273 			if (sqt_spin)
274 				timeout = jiffies + sqd->sq_thread_idle;
275 			if (unlikely(need_resched())) {
276 				mutex_unlock(&sqd->lock);
277 				cond_resched();
278 				mutex_lock(&sqd->lock);
279 				sqd->sq_cpu = raw_smp_processor_id();
280 			}
281 			continue;
282 		}
283 
284 		prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
285 		if (!io_sqd_events_pending(sqd) && !task_work_pending(current)) {
286 			bool needs_sched = true;
287 
288 			list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
289 				atomic_or(IORING_SQ_NEED_WAKEUP,
290 						&ctx->rings->sq_flags);
291 				if ((ctx->flags & IORING_SETUP_IOPOLL) &&
292 				    !wq_list_empty(&ctx->iopoll_list)) {
293 					needs_sched = false;
294 					break;
295 				}
296 
297 				/*
298 				 * Ensure the store of the wakeup flag is not
299 				 * reordered with the load of the SQ tail
300 				 */
301 				smp_mb__after_atomic();
302 
303 				if (io_sqring_entries(ctx)) {
304 					needs_sched = false;
305 					break;
306 				}
307 			}
308 
309 			if (needs_sched) {
310 				mutex_unlock(&sqd->lock);
311 				schedule();
312 				mutex_lock(&sqd->lock);
313 				sqd->sq_cpu = raw_smp_processor_id();
314 			}
315 			list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
316 				atomic_andnot(IORING_SQ_NEED_WAKEUP,
317 						&ctx->rings->sq_flags);
318 		}
319 
320 		finish_wait(&sqd->wait, &wait);
321 		timeout = jiffies + sqd->sq_thread_idle;
322 	}
323 
324 	io_uring_cancel_generic(true, sqd);
325 	sqd->thread = NULL;
326 	list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
327 		atomic_or(IORING_SQ_NEED_WAKEUP, &ctx->rings->sq_flags);
328 	io_run_task_work();
329 	mutex_unlock(&sqd->lock);
330 
331 	complete(&sqd->exited);
332 	do_exit(0);
333 }
334 
io_sqpoll_wait_sq(struct io_ring_ctx * ctx)335 void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
336 {
337 	DEFINE_WAIT(wait);
338 
339 	do {
340 		if (!io_sqring_full(ctx))
341 			break;
342 		prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
343 
344 		if (!io_sqring_full(ctx))
345 			break;
346 		schedule();
347 	} while (!signal_pending(current));
348 
349 	finish_wait(&ctx->sqo_sq_wait, &wait);
350 }
351 
io_sq_offload_create(struct io_ring_ctx * ctx,struct io_uring_params * p)352 __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
353 				struct io_uring_params *p)
354 {
355 	int ret;
356 
357 	/* Retain compatibility with failing for an invalid attach attempt */
358 	if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
359 				IORING_SETUP_ATTACH_WQ) {
360 		struct fd f;
361 
362 		f = fdget(p->wq_fd);
363 		if (!f.file)
364 			return -ENXIO;
365 		if (!io_is_uring_fops(f.file)) {
366 			fdput(f);
367 			return -EINVAL;
368 		}
369 		fdput(f);
370 	}
371 	if (ctx->flags & IORING_SETUP_SQPOLL) {
372 		struct task_struct *tsk;
373 		struct io_sq_data *sqd;
374 		bool attached;
375 
376 		ret = security_uring_sqpoll();
377 		if (ret)
378 			return ret;
379 
380 		sqd = io_get_sq_data(p, &attached);
381 		if (IS_ERR(sqd)) {
382 			ret = PTR_ERR(sqd);
383 			goto err;
384 		}
385 
386 		ctx->sq_creds = get_current_cred();
387 		ctx->sq_data = sqd;
388 		ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
389 		if (!ctx->sq_thread_idle)
390 			ctx->sq_thread_idle = HZ;
391 
392 		io_sq_thread_park(sqd);
393 		list_add(&ctx->sqd_list, &sqd->ctx_list);
394 		io_sqd_update_thread_idle(sqd);
395 		/* don't attach to a dying SQPOLL thread, would be racy */
396 		ret = (attached && !sqd->thread) ? -ENXIO : 0;
397 		io_sq_thread_unpark(sqd);
398 
399 		if (ret < 0)
400 			goto err;
401 		if (attached)
402 			return 0;
403 
404 		if (p->flags & IORING_SETUP_SQ_AFF) {
405 			cpumask_var_t allowed_mask;
406 			int cpu = p->sq_thread_cpu;
407 
408 			ret = -EINVAL;
409 			if (cpu >= nr_cpu_ids || !cpu_online(cpu))
410 				goto err_sqpoll;
411 			ret = -ENOMEM;
412 			if (!alloc_cpumask_var(&allowed_mask, GFP_KERNEL))
413 				goto err_sqpoll;
414 			ret = -EINVAL;
415 			cpuset_cpus_allowed(current, allowed_mask);
416 			if (!cpumask_test_cpu(cpu, allowed_mask)) {
417 				free_cpumask_var(allowed_mask);
418 				goto err_sqpoll;
419 			}
420 			free_cpumask_var(allowed_mask);
421 			sqd->sq_cpu = cpu;
422 		} else {
423 			sqd->sq_cpu = -1;
424 		}
425 
426 		sqd->task_pid = current->pid;
427 		sqd->task_tgid = current->tgid;
428 		tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
429 		if (IS_ERR(tsk)) {
430 			ret = PTR_ERR(tsk);
431 			goto err_sqpoll;
432 		}
433 
434 		sqd->thread = tsk;
435 		ret = io_uring_alloc_task_context(tsk, ctx);
436 		wake_up_new_task(tsk);
437 		if (ret)
438 			goto err;
439 	} else if (p->flags & IORING_SETUP_SQ_AFF) {
440 		/* Can't have SQ_AFF without SQPOLL */
441 		ret = -EINVAL;
442 		goto err;
443 	}
444 
445 	return 0;
446 err_sqpoll:
447 	complete(&ctx->sq_data->exited);
448 err:
449 	io_sq_thread_finish(ctx);
450 	return ret;
451 }
452 
io_sqpoll_wq_cpu_affinity(struct io_ring_ctx * ctx,cpumask_var_t mask)453 __cold int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx,
454 				     cpumask_var_t mask)
455 {
456 	struct io_sq_data *sqd = ctx->sq_data;
457 	int ret = -EINVAL;
458 
459 	if (sqd) {
460 		io_sq_thread_park(sqd);
461 		/* Don't set affinity for a dying thread */
462 		if (sqd->thread)
463 			ret = io_wq_cpu_affinity(sqd->thread->io_uring, mask);
464 		io_sq_thread_unpark(sqd);
465 	}
466 
467 	return ret;
468 }
469