xref: /openbmc/linux/io_uring/io_uring.c (revision 724ba675)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Shared application/kernel submission and completion ring pairs, for
4  * supporting fast/efficient IO.
5  *
6  * A note on the read/write ordering memory barriers that are matched between
7  * the application and kernel side.
8  *
9  * After the application reads the CQ ring tail, it must use an
10  * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11  * before writing the tail (using smp_load_acquire to read the tail will
12  * do). It also needs a smp_mb() before updating CQ head (ordering the
13  * entry load(s) with the head store), pairing with an implicit barrier
14  * through a control-dependency in io_get_cqe (smp_store_release to
15  * store head will do). Failure to do so could lead to reading invalid
16  * CQ entries.
17  *
18  * Likewise, the application must use an appropriate smp_wmb() before
19  * writing the SQ tail (ordering SQ entry stores with the tail store),
20  * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21  * to store the tail will do). And it needs a barrier ordering the SQ
22  * head load before writing new SQ entries (smp_load_acquire to read
23  * head will do).
24  *
25  * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26  * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27  * updating the SQ tail; a full memory barrier smp_mb() is needed
28  * between.
29  *
30  * Also see the examples in the liburing library:
31  *
32  *	git://git.kernel.dk/liburing
33  *
34  * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35  * from data shared between the kernel and application. This is done both
36  * for ordering purposes, but also to ensure that once a value is loaded from
37  * data that the application could potentially modify, it remains stable.
38  *
39  * Copyright (C) 2018-2019 Jens Axboe
40  * Copyright (c) 2018-2019 Christoph Hellwig
41  */
42 #include <linux/kernel.h>
43 #include <linux/init.h>
44 #include <linux/errno.h>
45 #include <linux/syscalls.h>
46 #include <net/compat.h>
47 #include <linux/refcount.h>
48 #include <linux/uio.h>
49 #include <linux/bits.h>
50 
51 #include <linux/sched/signal.h>
52 #include <linux/fs.h>
53 #include <linux/file.h>
54 #include <linux/fdtable.h>
55 #include <linux/mm.h>
56 #include <linux/mman.h>
57 #include <linux/percpu.h>
58 #include <linux/slab.h>
59 #include <linux/bvec.h>
60 #include <linux/net.h>
61 #include <net/sock.h>
62 #include <net/af_unix.h>
63 #include <net/scm.h>
64 #include <linux/anon_inodes.h>
65 #include <linux/sched/mm.h>
66 #include <linux/uaccess.h>
67 #include <linux/nospec.h>
68 #include <linux/highmem.h>
69 #include <linux/fsnotify.h>
70 #include <linux/fadvise.h>
71 #include <linux/task_work.h>
72 #include <linux/io_uring.h>
73 #include <linux/audit.h>
74 #include <linux/security.h>
75 #include <asm/shmparam.h>
76 
77 #define CREATE_TRACE_POINTS
78 #include <trace/events/io_uring.h>
79 
80 #include <uapi/linux/io_uring.h>
81 
82 #include "io-wq.h"
83 
84 #include "io_uring.h"
85 #include "opdef.h"
86 #include "refs.h"
87 #include "tctx.h"
88 #include "sqpoll.h"
89 #include "fdinfo.h"
90 #include "kbuf.h"
91 #include "rsrc.h"
92 #include "cancel.h"
93 #include "net.h"
94 #include "notif.h"
95 
96 #include "timeout.h"
97 #include "poll.h"
98 #include "alloc_cache.h"
99 
100 #define IORING_MAX_ENTRIES	32768
101 #define IORING_MAX_CQ_ENTRIES	(2 * IORING_MAX_ENTRIES)
102 
103 #define IORING_MAX_RESTRICTIONS	(IORING_RESTRICTION_LAST + \
104 				 IORING_REGISTER_LAST + IORING_OP_LAST)
105 
106 #define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \
107 			  IOSQE_IO_HARDLINK | IOSQE_ASYNC)
108 
109 #define SQE_VALID_FLAGS	(SQE_COMMON_FLAGS | IOSQE_BUFFER_SELECT | \
110 			IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
111 
112 #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
113 				REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \
114 				REQ_F_ASYNC_DATA)
115 
116 #define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | REQ_F_LINK | REQ_F_HARDLINK |\
117 				 IO_REQ_CLEAN_FLAGS)
118 
119 #define IO_TCTX_REFS_CACHE_NR	(1U << 10)
120 
121 #define IO_COMPL_BATCH			32
122 #define IO_REQ_ALLOC_BATCH		8
123 
124 enum {
125 	IO_CHECK_CQ_OVERFLOW_BIT,
126 	IO_CHECK_CQ_DROPPED_BIT,
127 };
128 
129 enum {
130 	IO_EVENTFD_OP_SIGNAL_BIT,
131 	IO_EVENTFD_OP_FREE_BIT,
132 };
133 
134 struct io_defer_entry {
135 	struct list_head	list;
136 	struct io_kiocb		*req;
137 	u32			seq;
138 };
139 
140 /* requests with any of those set should undergo io_disarm_next() */
141 #define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
142 #define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK)
143 
144 static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
145 					 struct task_struct *task,
146 					 bool cancel_all);
147 
148 static void io_dismantle_req(struct io_kiocb *req);
149 static void io_clean_op(struct io_kiocb *req);
150 static void io_queue_sqe(struct io_kiocb *req);
151 static void io_move_task_work_from_local(struct io_ring_ctx *ctx);
152 static void __io_submit_flush_completions(struct io_ring_ctx *ctx);
153 static __cold void io_fallback_tw(struct io_uring_task *tctx);
154 
155 struct kmem_cache *req_cachep;
156 
157 struct sock *io_uring_get_socket(struct file *file)
158 {
159 #if defined(CONFIG_UNIX)
160 	if (io_is_uring_fops(file)) {
161 		struct io_ring_ctx *ctx = file->private_data;
162 
163 		return ctx->ring_sock->sk;
164 	}
165 #endif
166 	return NULL;
167 }
168 EXPORT_SYMBOL(io_uring_get_socket);
169 
170 static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
171 {
172 	if (!wq_list_empty(&ctx->submit_state.compl_reqs) ||
173 	    ctx->submit_state.cqes_count)
174 		__io_submit_flush_completions(ctx);
175 }
176 
177 static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
178 {
179 	return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
180 }
181 
182 static inline unsigned int __io_cqring_events_user(struct io_ring_ctx *ctx)
183 {
184 	return READ_ONCE(ctx->rings->cq.tail) - READ_ONCE(ctx->rings->cq.head);
185 }
186 
187 static bool io_match_linked(struct io_kiocb *head)
188 {
189 	struct io_kiocb *req;
190 
191 	io_for_each_link(req, head) {
192 		if (req->flags & REQ_F_INFLIGHT)
193 			return true;
194 	}
195 	return false;
196 }
197 
198 /*
199  * As io_match_task() but protected against racing with linked timeouts.
200  * User must not hold timeout_lock.
201  */
202 bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
203 			bool cancel_all)
204 {
205 	bool matched;
206 
207 	if (task && head->task != task)
208 		return false;
209 	if (cancel_all)
210 		return true;
211 
212 	if (head->flags & REQ_F_LINK_TIMEOUT) {
213 		struct io_ring_ctx *ctx = head->ctx;
214 
215 		/* protect against races with linked timeouts */
216 		spin_lock_irq(&ctx->timeout_lock);
217 		matched = io_match_linked(head);
218 		spin_unlock_irq(&ctx->timeout_lock);
219 	} else {
220 		matched = io_match_linked(head);
221 	}
222 	return matched;
223 }
224 
225 static inline void req_fail_link_node(struct io_kiocb *req, int res)
226 {
227 	req_set_fail(req);
228 	io_req_set_res(req, res, 0);
229 }
230 
231 static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx)
232 {
233 	wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
234 	kasan_poison_object_data(req_cachep, req);
235 }
236 
237 static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref)
238 {
239 	struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
240 
241 	complete(&ctx->ref_comp);
242 }
243 
244 static __cold void io_fallback_req_func(struct work_struct *work)
245 {
246 	struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
247 						fallback_work.work);
248 	struct llist_node *node = llist_del_all(&ctx->fallback_llist);
249 	struct io_kiocb *req, *tmp;
250 	struct io_tw_state ts = { .locked = true, };
251 
252 	mutex_lock(&ctx->uring_lock);
253 	llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
254 		req->io_task_work.func(req, &ts);
255 	if (WARN_ON_ONCE(!ts.locked))
256 		return;
257 	io_submit_flush_completions(ctx);
258 	mutex_unlock(&ctx->uring_lock);
259 }
260 
261 static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits)
262 {
263 	unsigned hash_buckets = 1U << bits;
264 	size_t hash_size = hash_buckets * sizeof(table->hbs[0]);
265 
266 	table->hbs = kmalloc(hash_size, GFP_KERNEL);
267 	if (!table->hbs)
268 		return -ENOMEM;
269 
270 	table->hash_bits = bits;
271 	init_hash_table(table, hash_buckets);
272 	return 0;
273 }
274 
275 static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
276 {
277 	struct io_ring_ctx *ctx;
278 	int hash_bits;
279 
280 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
281 	if (!ctx)
282 		return NULL;
283 
284 	xa_init(&ctx->io_bl_xa);
285 
286 	/*
287 	 * Use 5 bits less than the max cq entries, that should give us around
288 	 * 32 entries per hash list if totally full and uniformly spread, but
289 	 * don't keep too many buckets to not overconsume memory.
290 	 */
291 	hash_bits = ilog2(p->cq_entries) - 5;
292 	hash_bits = clamp(hash_bits, 1, 8);
293 	if (io_alloc_hash_table(&ctx->cancel_table, hash_bits))
294 		goto err;
295 	if (io_alloc_hash_table(&ctx->cancel_table_locked, hash_bits))
296 		goto err;
297 
298 	ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
299 	if (!ctx->dummy_ubuf)
300 		goto err;
301 	/* set invalid range, so io_import_fixed() fails meeting it */
302 	ctx->dummy_ubuf->ubuf = -1UL;
303 
304 	if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
305 			    0, GFP_KERNEL))
306 		goto err;
307 
308 	ctx->flags = p->flags;
309 	init_waitqueue_head(&ctx->sqo_sq_wait);
310 	INIT_LIST_HEAD(&ctx->sqd_list);
311 	INIT_LIST_HEAD(&ctx->cq_overflow_list);
312 	INIT_LIST_HEAD(&ctx->io_buffers_cache);
313 	io_alloc_cache_init(&ctx->rsrc_node_cache, IO_NODE_ALLOC_CACHE_MAX,
314 			    sizeof(struct io_rsrc_node));
315 	io_alloc_cache_init(&ctx->apoll_cache, IO_ALLOC_CACHE_MAX,
316 			    sizeof(struct async_poll));
317 	io_alloc_cache_init(&ctx->netmsg_cache, IO_ALLOC_CACHE_MAX,
318 			    sizeof(struct io_async_msghdr));
319 	init_completion(&ctx->ref_comp);
320 	xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
321 	mutex_init(&ctx->uring_lock);
322 	init_waitqueue_head(&ctx->cq_wait);
323 	init_waitqueue_head(&ctx->poll_wq);
324 	init_waitqueue_head(&ctx->rsrc_quiesce_wq);
325 	spin_lock_init(&ctx->completion_lock);
326 	spin_lock_init(&ctx->timeout_lock);
327 	INIT_WQ_LIST(&ctx->iopoll_list);
328 	INIT_LIST_HEAD(&ctx->io_buffers_pages);
329 	INIT_LIST_HEAD(&ctx->io_buffers_comp);
330 	INIT_LIST_HEAD(&ctx->defer_list);
331 	INIT_LIST_HEAD(&ctx->timeout_list);
332 	INIT_LIST_HEAD(&ctx->ltimeout_list);
333 	INIT_LIST_HEAD(&ctx->rsrc_ref_list);
334 	init_llist_head(&ctx->work_llist);
335 	INIT_LIST_HEAD(&ctx->tctx_list);
336 	ctx->submit_state.free_list.next = NULL;
337 	INIT_WQ_LIST(&ctx->locked_free_list);
338 	INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
339 	INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
340 	return ctx;
341 err:
342 	kfree(ctx->dummy_ubuf);
343 	kfree(ctx->cancel_table.hbs);
344 	kfree(ctx->cancel_table_locked.hbs);
345 	kfree(ctx->io_bl);
346 	xa_destroy(&ctx->io_bl_xa);
347 	kfree(ctx);
348 	return NULL;
349 }
350 
351 static void io_account_cq_overflow(struct io_ring_ctx *ctx)
352 {
353 	struct io_rings *r = ctx->rings;
354 
355 	WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
356 	ctx->cq_extra--;
357 }
358 
359 static bool req_need_defer(struct io_kiocb *req, u32 seq)
360 {
361 	if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
362 		struct io_ring_ctx *ctx = req->ctx;
363 
364 		return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
365 	}
366 
367 	return false;
368 }
369 
370 static inline void io_req_track_inflight(struct io_kiocb *req)
371 {
372 	if (!(req->flags & REQ_F_INFLIGHT)) {
373 		req->flags |= REQ_F_INFLIGHT;
374 		atomic_inc(&req->task->io_uring->inflight_tracked);
375 	}
376 }
377 
378 static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
379 {
380 	if (WARN_ON_ONCE(!req->link))
381 		return NULL;
382 
383 	req->flags &= ~REQ_F_ARM_LTIMEOUT;
384 	req->flags |= REQ_F_LINK_TIMEOUT;
385 
386 	/* linked timeouts should have two refs once prep'ed */
387 	io_req_set_refcount(req);
388 	__io_req_set_refcount(req->link, 2);
389 	return req->link;
390 }
391 
392 static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
393 {
394 	if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
395 		return NULL;
396 	return __io_prep_linked_timeout(req);
397 }
398 
399 static noinline void __io_arm_ltimeout(struct io_kiocb *req)
400 {
401 	io_queue_linked_timeout(__io_prep_linked_timeout(req));
402 }
403 
404 static inline void io_arm_ltimeout(struct io_kiocb *req)
405 {
406 	if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT))
407 		__io_arm_ltimeout(req);
408 }
409 
410 static void io_prep_async_work(struct io_kiocb *req)
411 {
412 	const struct io_issue_def *def = &io_issue_defs[req->opcode];
413 	struct io_ring_ctx *ctx = req->ctx;
414 
415 	if (!(req->flags & REQ_F_CREDS)) {
416 		req->flags |= REQ_F_CREDS;
417 		req->creds = get_current_cred();
418 	}
419 
420 	req->work.list.next = NULL;
421 	req->work.flags = 0;
422 	req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
423 	if (req->flags & REQ_F_FORCE_ASYNC)
424 		req->work.flags |= IO_WQ_WORK_CONCURRENT;
425 
426 	if (req->file && !io_req_ffs_set(req))
427 		req->flags |= io_file_get_flags(req->file) << REQ_F_SUPPORT_NOWAIT_BIT;
428 
429 	if (req->file && (req->flags & REQ_F_ISREG)) {
430 		bool should_hash = def->hash_reg_file;
431 
432 		/* don't serialize this request if the fs doesn't need it */
433 		if (should_hash && (req->file->f_flags & O_DIRECT) &&
434 		    (req->file->f_mode & FMODE_DIO_PARALLEL_WRITE))
435 			should_hash = false;
436 		if (should_hash || (ctx->flags & IORING_SETUP_IOPOLL))
437 			io_wq_hash_work(&req->work, file_inode(req->file));
438 	} else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
439 		if (def->unbound_nonreg_file)
440 			req->work.flags |= IO_WQ_WORK_UNBOUND;
441 	}
442 }
443 
444 static void io_prep_async_link(struct io_kiocb *req)
445 {
446 	struct io_kiocb *cur;
447 
448 	if (req->flags & REQ_F_LINK_TIMEOUT) {
449 		struct io_ring_ctx *ctx = req->ctx;
450 
451 		spin_lock_irq(&ctx->timeout_lock);
452 		io_for_each_link(cur, req)
453 			io_prep_async_work(cur);
454 		spin_unlock_irq(&ctx->timeout_lock);
455 	} else {
456 		io_for_each_link(cur, req)
457 			io_prep_async_work(cur);
458 	}
459 }
460 
461 void io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use)
462 {
463 	struct io_kiocb *link = io_prep_linked_timeout(req);
464 	struct io_uring_task *tctx = req->task->io_uring;
465 
466 	BUG_ON(!tctx);
467 	BUG_ON(!tctx->io_wq);
468 
469 	/* init ->work of the whole link before punting */
470 	io_prep_async_link(req);
471 
472 	/*
473 	 * Not expected to happen, but if we do have a bug where this _can_
474 	 * happen, catch it here and ensure the request is marked as
475 	 * canceled. That will make io-wq go through the usual work cancel
476 	 * procedure rather than attempt to run this request (or create a new
477 	 * worker for it).
478 	 */
479 	if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
480 		req->work.flags |= IO_WQ_WORK_CANCEL;
481 
482 	trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
483 	io_wq_enqueue(tctx->io_wq, &req->work);
484 	if (link)
485 		io_queue_linked_timeout(link);
486 }
487 
488 static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
489 {
490 	while (!list_empty(&ctx->defer_list)) {
491 		struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
492 						struct io_defer_entry, list);
493 
494 		if (req_need_defer(de->req, de->seq))
495 			break;
496 		list_del_init(&de->list);
497 		io_req_task_queue(de->req);
498 		kfree(de);
499 	}
500 }
501 
502 
503 static void io_eventfd_ops(struct rcu_head *rcu)
504 {
505 	struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
506 	int ops = atomic_xchg(&ev_fd->ops, 0);
507 
508 	if (ops & BIT(IO_EVENTFD_OP_SIGNAL_BIT))
509 		eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE);
510 
511 	/* IO_EVENTFD_OP_FREE_BIT may not be set here depending on callback
512 	 * ordering in a race but if references are 0 we know we have to free
513 	 * it regardless.
514 	 */
515 	if (atomic_dec_and_test(&ev_fd->refs)) {
516 		eventfd_ctx_put(ev_fd->cq_ev_fd);
517 		kfree(ev_fd);
518 	}
519 }
520 
521 static void io_eventfd_signal(struct io_ring_ctx *ctx)
522 {
523 	struct io_ev_fd *ev_fd = NULL;
524 
525 	rcu_read_lock();
526 	/*
527 	 * rcu_dereference ctx->io_ev_fd once and use it for both for checking
528 	 * and eventfd_signal
529 	 */
530 	ev_fd = rcu_dereference(ctx->io_ev_fd);
531 
532 	/*
533 	 * Check again if ev_fd exists incase an io_eventfd_unregister call
534 	 * completed between the NULL check of ctx->io_ev_fd at the start of
535 	 * the function and rcu_read_lock.
536 	 */
537 	if (unlikely(!ev_fd))
538 		goto out;
539 	if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
540 		goto out;
541 	if (ev_fd->eventfd_async && !io_wq_current_is_worker())
542 		goto out;
543 
544 	if (likely(eventfd_signal_allowed())) {
545 		eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE);
546 	} else {
547 		atomic_inc(&ev_fd->refs);
548 		if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops))
549 			call_rcu_hurry(&ev_fd->rcu, io_eventfd_ops);
550 		else
551 			atomic_dec(&ev_fd->refs);
552 	}
553 
554 out:
555 	rcu_read_unlock();
556 }
557 
558 static void io_eventfd_flush_signal(struct io_ring_ctx *ctx)
559 {
560 	bool skip;
561 
562 	spin_lock(&ctx->completion_lock);
563 
564 	/*
565 	 * Eventfd should only get triggered when at least one event has been
566 	 * posted. Some applications rely on the eventfd notification count
567 	 * only changing IFF a new CQE has been added to the CQ ring. There's
568 	 * no depedency on 1:1 relationship between how many times this
569 	 * function is called (and hence the eventfd count) and number of CQEs
570 	 * posted to the CQ ring.
571 	 */
572 	skip = ctx->cached_cq_tail == ctx->evfd_last_cq_tail;
573 	ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
574 	spin_unlock(&ctx->completion_lock);
575 	if (skip)
576 		return;
577 
578 	io_eventfd_signal(ctx);
579 }
580 
581 void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
582 {
583 	if (ctx->poll_activated)
584 		io_poll_wq_wake(ctx);
585 	if (ctx->off_timeout_used)
586 		io_flush_timeouts(ctx);
587 	if (ctx->drain_active) {
588 		spin_lock(&ctx->completion_lock);
589 		io_queue_deferred(ctx);
590 		spin_unlock(&ctx->completion_lock);
591 	}
592 	if (ctx->has_evfd)
593 		io_eventfd_flush_signal(ctx);
594 }
595 
596 static inline void __io_cq_lock(struct io_ring_ctx *ctx)
597 	__acquires(ctx->completion_lock)
598 {
599 	if (!ctx->task_complete)
600 		spin_lock(&ctx->completion_lock);
601 }
602 
603 static inline void __io_cq_unlock(struct io_ring_ctx *ctx)
604 {
605 	if (!ctx->task_complete)
606 		spin_unlock(&ctx->completion_lock);
607 }
608 
609 static inline void io_cq_lock(struct io_ring_ctx *ctx)
610 	__acquires(ctx->completion_lock)
611 {
612 	spin_lock(&ctx->completion_lock);
613 }
614 
615 static inline void io_cq_unlock(struct io_ring_ctx *ctx)
616 	__releases(ctx->completion_lock)
617 {
618 	spin_unlock(&ctx->completion_lock);
619 }
620 
621 /* keep it inlined for io_submit_flush_completions() */
622 static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx)
623 	__releases(ctx->completion_lock)
624 {
625 	io_commit_cqring(ctx);
626 	__io_cq_unlock(ctx);
627 	io_commit_cqring_flush(ctx);
628 	io_cqring_wake(ctx);
629 }
630 
631 static void __io_cq_unlock_post_flush(struct io_ring_ctx *ctx)
632 	__releases(ctx->completion_lock)
633 {
634 	io_commit_cqring(ctx);
635 
636 	if (ctx->task_complete) {
637 		/*
638 		 * ->task_complete implies that only current might be waiting
639 		 * for CQEs, and obviously, we currently don't. No one is
640 		 * waiting, wakeups are futile, skip them.
641 		 */
642 		io_commit_cqring_flush(ctx);
643 	} else {
644 		__io_cq_unlock(ctx);
645 		io_commit_cqring_flush(ctx);
646 		io_cqring_wake(ctx);
647 	}
648 }
649 
650 void io_cq_unlock_post(struct io_ring_ctx *ctx)
651 	__releases(ctx->completion_lock)
652 {
653 	io_commit_cqring(ctx);
654 	spin_unlock(&ctx->completion_lock);
655 	io_commit_cqring_flush(ctx);
656 	io_cqring_wake(ctx);
657 }
658 
659 /* Returns true if there are no backlogged entries after the flush */
660 static void io_cqring_overflow_kill(struct io_ring_ctx *ctx)
661 {
662 	struct io_overflow_cqe *ocqe;
663 	LIST_HEAD(list);
664 
665 	io_cq_lock(ctx);
666 	list_splice_init(&ctx->cq_overflow_list, &list);
667 	clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
668 	io_cq_unlock(ctx);
669 
670 	while (!list_empty(&list)) {
671 		ocqe = list_first_entry(&list, struct io_overflow_cqe, list);
672 		list_del(&ocqe->list);
673 		kfree(ocqe);
674 	}
675 }
676 
677 static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
678 {
679 	size_t cqe_size = sizeof(struct io_uring_cqe);
680 
681 	if (__io_cqring_events(ctx) == ctx->cq_entries)
682 		return;
683 
684 	if (ctx->flags & IORING_SETUP_CQE32)
685 		cqe_size <<= 1;
686 
687 	io_cq_lock(ctx);
688 	while (!list_empty(&ctx->cq_overflow_list)) {
689 		struct io_uring_cqe *cqe = io_get_cqe_overflow(ctx, true);
690 		struct io_overflow_cqe *ocqe;
691 
692 		if (!cqe)
693 			break;
694 		ocqe = list_first_entry(&ctx->cq_overflow_list,
695 					struct io_overflow_cqe, list);
696 		memcpy(cqe, &ocqe->cqe, cqe_size);
697 		list_del(&ocqe->list);
698 		kfree(ocqe);
699 	}
700 
701 	if (list_empty(&ctx->cq_overflow_list)) {
702 		clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
703 		atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
704 	}
705 	io_cq_unlock_post(ctx);
706 }
707 
708 static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx)
709 {
710 	/* iopoll syncs against uring_lock, not completion_lock */
711 	if (ctx->flags & IORING_SETUP_IOPOLL)
712 		mutex_lock(&ctx->uring_lock);
713 	__io_cqring_overflow_flush(ctx);
714 	if (ctx->flags & IORING_SETUP_IOPOLL)
715 		mutex_unlock(&ctx->uring_lock);
716 }
717 
718 static void io_cqring_overflow_flush(struct io_ring_ctx *ctx)
719 {
720 	if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
721 		io_cqring_do_overflow_flush(ctx);
722 }
723 
724 /* can be called by any task */
725 static void io_put_task_remote(struct task_struct *task, int nr)
726 {
727 	struct io_uring_task *tctx = task->io_uring;
728 
729 	percpu_counter_sub(&tctx->inflight, nr);
730 	if (unlikely(atomic_read(&tctx->in_cancel)))
731 		wake_up(&tctx->wait);
732 	put_task_struct_many(task, nr);
733 }
734 
735 /* used by a task to put its own references */
736 static void io_put_task_local(struct task_struct *task, int nr)
737 {
738 	task->io_uring->cached_refs += nr;
739 }
740 
741 /* must to be called somewhat shortly after putting a request */
742 static inline void io_put_task(struct task_struct *task, int nr)
743 {
744 	if (likely(task == current))
745 		io_put_task_local(task, nr);
746 	else
747 		io_put_task_remote(task, nr);
748 }
749 
750 void io_task_refs_refill(struct io_uring_task *tctx)
751 {
752 	unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
753 
754 	percpu_counter_add(&tctx->inflight, refill);
755 	refcount_add(refill, &current->usage);
756 	tctx->cached_refs += refill;
757 }
758 
759 static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
760 {
761 	struct io_uring_task *tctx = task->io_uring;
762 	unsigned int refs = tctx->cached_refs;
763 
764 	if (refs) {
765 		tctx->cached_refs = 0;
766 		percpu_counter_sub(&tctx->inflight, refs);
767 		put_task_struct_many(task, refs);
768 	}
769 }
770 
771 static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
772 				     s32 res, u32 cflags, u64 extra1, u64 extra2)
773 {
774 	struct io_overflow_cqe *ocqe;
775 	size_t ocq_size = sizeof(struct io_overflow_cqe);
776 	bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
777 
778 	lockdep_assert_held(&ctx->completion_lock);
779 
780 	if (is_cqe32)
781 		ocq_size += sizeof(struct io_uring_cqe);
782 
783 	ocqe = kmalloc(ocq_size, GFP_ATOMIC | __GFP_ACCOUNT);
784 	trace_io_uring_cqe_overflow(ctx, user_data, res, cflags, ocqe);
785 	if (!ocqe) {
786 		/*
787 		 * If we're in ring overflow flush mode, or in task cancel mode,
788 		 * or cannot allocate an overflow entry, then we need to drop it
789 		 * on the floor.
790 		 */
791 		io_account_cq_overflow(ctx);
792 		set_bit(IO_CHECK_CQ_DROPPED_BIT, &ctx->check_cq);
793 		return false;
794 	}
795 	if (list_empty(&ctx->cq_overflow_list)) {
796 		set_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
797 		atomic_or(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
798 
799 	}
800 	ocqe->cqe.user_data = user_data;
801 	ocqe->cqe.res = res;
802 	ocqe->cqe.flags = cflags;
803 	if (is_cqe32) {
804 		ocqe->cqe.big_cqe[0] = extra1;
805 		ocqe->cqe.big_cqe[1] = extra2;
806 	}
807 	list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
808 	return true;
809 }
810 
811 bool io_req_cqe_overflow(struct io_kiocb *req)
812 {
813 	if (!(req->flags & REQ_F_CQE32_INIT)) {
814 		req->extra1 = 0;
815 		req->extra2 = 0;
816 	}
817 	return io_cqring_event_overflow(req->ctx, req->cqe.user_data,
818 					req->cqe.res, req->cqe.flags,
819 					req->extra1, req->extra2);
820 }
821 
822 /*
823  * writes to the cq entry need to come after reading head; the
824  * control dependency is enough as we're using WRITE_ONCE to
825  * fill the cq entry
826  */
827 struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow)
828 {
829 	struct io_rings *rings = ctx->rings;
830 	unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1);
831 	unsigned int free, queued, len;
832 
833 	/*
834 	 * Posting into the CQ when there are pending overflowed CQEs may break
835 	 * ordering guarantees, which will affect links, F_MORE users and more.
836 	 * Force overflow the completion.
837 	 */
838 	if (!overflow && (ctx->check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)))
839 		return NULL;
840 
841 	/* userspace may cheat modifying the tail, be safe and do min */
842 	queued = min(__io_cqring_events(ctx), ctx->cq_entries);
843 	free = ctx->cq_entries - queued;
844 	/* we need a contiguous range, limit based on the current array offset */
845 	len = min(free, ctx->cq_entries - off);
846 	if (!len)
847 		return NULL;
848 
849 	if (ctx->flags & IORING_SETUP_CQE32) {
850 		off <<= 1;
851 		len <<= 1;
852 	}
853 
854 	ctx->cqe_cached = &rings->cqes[off];
855 	ctx->cqe_sentinel = ctx->cqe_cached + len;
856 
857 	ctx->cached_cq_tail++;
858 	ctx->cqe_cached++;
859 	if (ctx->flags & IORING_SETUP_CQE32)
860 		ctx->cqe_cached++;
861 	return &rings->cqes[off];
862 }
863 
864 static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
865 			      u32 cflags)
866 {
867 	struct io_uring_cqe *cqe;
868 
869 	ctx->cq_extra++;
870 
871 	/*
872 	 * If we can't get a cq entry, userspace overflowed the
873 	 * submission (by quite a lot). Increment the overflow count in
874 	 * the ring.
875 	 */
876 	cqe = io_get_cqe(ctx);
877 	if (likely(cqe)) {
878 		trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0);
879 
880 		WRITE_ONCE(cqe->user_data, user_data);
881 		WRITE_ONCE(cqe->res, res);
882 		WRITE_ONCE(cqe->flags, cflags);
883 
884 		if (ctx->flags & IORING_SETUP_CQE32) {
885 			WRITE_ONCE(cqe->big_cqe[0], 0);
886 			WRITE_ONCE(cqe->big_cqe[1], 0);
887 		}
888 		return true;
889 	}
890 	return false;
891 }
892 
893 static void __io_flush_post_cqes(struct io_ring_ctx *ctx)
894 	__must_hold(&ctx->uring_lock)
895 {
896 	struct io_submit_state *state = &ctx->submit_state;
897 	unsigned int i;
898 
899 	lockdep_assert_held(&ctx->uring_lock);
900 	for (i = 0; i < state->cqes_count; i++) {
901 		struct io_uring_cqe *cqe = &state->cqes[i];
902 
903 		if (!io_fill_cqe_aux(ctx, cqe->user_data, cqe->res, cqe->flags)) {
904 			if (ctx->task_complete) {
905 				spin_lock(&ctx->completion_lock);
906 				io_cqring_event_overflow(ctx, cqe->user_data,
907 							cqe->res, cqe->flags, 0, 0);
908 				spin_unlock(&ctx->completion_lock);
909 			} else {
910 				io_cqring_event_overflow(ctx, cqe->user_data,
911 							cqe->res, cqe->flags, 0, 0);
912 			}
913 		}
914 	}
915 	state->cqes_count = 0;
916 }
917 
918 static bool __io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
919 			      bool allow_overflow)
920 {
921 	bool filled;
922 
923 	io_cq_lock(ctx);
924 	filled = io_fill_cqe_aux(ctx, user_data, res, cflags);
925 	if (!filled && allow_overflow)
926 		filled = io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
927 
928 	io_cq_unlock_post(ctx);
929 	return filled;
930 }
931 
932 bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
933 {
934 	return __io_post_aux_cqe(ctx, user_data, res, cflags, true);
935 }
936 
937 bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32 cflags,
938 		bool allow_overflow)
939 {
940 	struct io_uring_cqe *cqe;
941 	unsigned int length;
942 
943 	if (!defer)
944 		return __io_post_aux_cqe(ctx, user_data, res, cflags, allow_overflow);
945 
946 	length = ARRAY_SIZE(ctx->submit_state.cqes);
947 
948 	lockdep_assert_held(&ctx->uring_lock);
949 
950 	if (ctx->submit_state.cqes_count == length) {
951 		__io_cq_lock(ctx);
952 		__io_flush_post_cqes(ctx);
953 		/* no need to flush - flush is deferred */
954 		__io_cq_unlock_post(ctx);
955 	}
956 
957 	/* For defered completions this is not as strict as it is otherwise,
958 	 * however it's main job is to prevent unbounded posted completions,
959 	 * and in that it works just as well.
960 	 */
961 	if (!allow_overflow && test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
962 		return false;
963 
964 	cqe = &ctx->submit_state.cqes[ctx->submit_state.cqes_count++];
965 	cqe->user_data = user_data;
966 	cqe->res = res;
967 	cqe->flags = cflags;
968 	return true;
969 }
970 
971 static void __io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
972 {
973 	struct io_ring_ctx *ctx = req->ctx;
974 	struct io_rsrc_node *rsrc_node = NULL;
975 
976 	io_cq_lock(ctx);
977 	if (!(req->flags & REQ_F_CQE_SKIP))
978 		io_fill_cqe_req(ctx, req);
979 
980 	/*
981 	 * If we're the last reference to this request, add to our locked
982 	 * free_list cache.
983 	 */
984 	if (req_ref_put_and_test(req)) {
985 		if (req->flags & IO_REQ_LINK_FLAGS) {
986 			if (req->flags & IO_DISARM_MASK)
987 				io_disarm_next(req);
988 			if (req->link) {
989 				io_req_task_queue(req->link);
990 				req->link = NULL;
991 			}
992 		}
993 		io_put_kbuf_comp(req);
994 		io_dismantle_req(req);
995 		rsrc_node = req->rsrc_node;
996 		/*
997 		 * Selected buffer deallocation in io_clean_op() assumes that
998 		 * we don't hold ->completion_lock. Clean them here to avoid
999 		 * deadlocks.
1000 		 */
1001 		io_put_task_remote(req->task, 1);
1002 		wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
1003 		ctx->locked_free_nr++;
1004 	}
1005 	io_cq_unlock_post(ctx);
1006 
1007 	if (rsrc_node) {
1008 		io_ring_submit_lock(ctx, issue_flags);
1009 		io_put_rsrc_node(ctx, rsrc_node);
1010 		io_ring_submit_unlock(ctx, issue_flags);
1011 	}
1012 }
1013 
1014 void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
1015 {
1016 	if (req->ctx->task_complete && req->ctx->submitter_task != current) {
1017 		req->io_task_work.func = io_req_task_complete;
1018 		io_req_task_work_add(req);
1019 	} else if (!(issue_flags & IO_URING_F_UNLOCKED) ||
1020 		   !(req->ctx->flags & IORING_SETUP_IOPOLL)) {
1021 		__io_req_complete_post(req, issue_flags);
1022 	} else {
1023 		struct io_ring_ctx *ctx = req->ctx;
1024 
1025 		mutex_lock(&ctx->uring_lock);
1026 		__io_req_complete_post(req, issue_flags & ~IO_URING_F_UNLOCKED);
1027 		mutex_unlock(&ctx->uring_lock);
1028 	}
1029 }
1030 
1031 void io_req_defer_failed(struct io_kiocb *req, s32 res)
1032 	__must_hold(&ctx->uring_lock)
1033 {
1034 	const struct io_cold_def *def = &io_cold_defs[req->opcode];
1035 
1036 	lockdep_assert_held(&req->ctx->uring_lock);
1037 
1038 	req_set_fail(req);
1039 	io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
1040 	if (def->fail)
1041 		def->fail(req);
1042 	io_req_complete_defer(req);
1043 }
1044 
1045 /*
1046  * Don't initialise the fields below on every allocation, but do that in
1047  * advance and keep them valid across allocations.
1048  */
1049 static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
1050 {
1051 	req->ctx = ctx;
1052 	req->link = NULL;
1053 	req->async_data = NULL;
1054 	/* not necessary, but safer to zero */
1055 	req->cqe.res = 0;
1056 }
1057 
1058 static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
1059 					struct io_submit_state *state)
1060 {
1061 	spin_lock(&ctx->completion_lock);
1062 	wq_list_splice(&ctx->locked_free_list, &state->free_list);
1063 	ctx->locked_free_nr = 0;
1064 	spin_unlock(&ctx->completion_lock);
1065 }
1066 
1067 /*
1068  * A request might get retired back into the request caches even before opcode
1069  * handlers and io_issue_sqe() are done with it, e.g. inline completion path.
1070  * Because of that, io_alloc_req() should be called only under ->uring_lock
1071  * and with extra caution to not get a request that is still worked on.
1072  */
1073 __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
1074 	__must_hold(&ctx->uring_lock)
1075 {
1076 	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
1077 	void *reqs[IO_REQ_ALLOC_BATCH];
1078 	int ret, i;
1079 
1080 	/*
1081 	 * If we have more than a batch's worth of requests in our IRQ side
1082 	 * locked cache, grab the lock and move them over to our submission
1083 	 * side cache.
1084 	 */
1085 	if (data_race(ctx->locked_free_nr) > IO_COMPL_BATCH) {
1086 		io_flush_cached_locked_reqs(ctx, &ctx->submit_state);
1087 		if (!io_req_cache_empty(ctx))
1088 			return true;
1089 	}
1090 
1091 	ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs);
1092 
1093 	/*
1094 	 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1095 	 * retry single alloc to be on the safe side.
1096 	 */
1097 	if (unlikely(ret <= 0)) {
1098 		reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1099 		if (!reqs[0])
1100 			return false;
1101 		ret = 1;
1102 	}
1103 
1104 	percpu_ref_get_many(&ctx->refs, ret);
1105 	for (i = 0; i < ret; i++) {
1106 		struct io_kiocb *req = reqs[i];
1107 
1108 		io_preinit_req(req, ctx);
1109 		io_req_add_to_cache(req, ctx);
1110 	}
1111 	return true;
1112 }
1113 
1114 static inline void io_dismantle_req(struct io_kiocb *req)
1115 {
1116 	unsigned int flags = req->flags;
1117 
1118 	if (unlikely(flags & IO_REQ_CLEAN_FLAGS))
1119 		io_clean_op(req);
1120 	if (!(flags & REQ_F_FIXED_FILE))
1121 		io_put_file(req->file);
1122 }
1123 
1124 static __cold void io_free_req_tw(struct io_kiocb *req, struct io_tw_state *ts)
1125 {
1126 	struct io_ring_ctx *ctx = req->ctx;
1127 
1128 	if (req->rsrc_node) {
1129 		io_tw_lock(ctx, ts);
1130 		io_put_rsrc_node(ctx, req->rsrc_node);
1131 	}
1132 	io_dismantle_req(req);
1133 	io_put_task_remote(req->task, 1);
1134 
1135 	spin_lock(&ctx->completion_lock);
1136 	wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
1137 	ctx->locked_free_nr++;
1138 	spin_unlock(&ctx->completion_lock);
1139 }
1140 
1141 __cold void io_free_req(struct io_kiocb *req)
1142 {
1143 	req->io_task_work.func = io_free_req_tw;
1144 	io_req_task_work_add(req);
1145 }
1146 
1147 static void __io_req_find_next_prep(struct io_kiocb *req)
1148 {
1149 	struct io_ring_ctx *ctx = req->ctx;
1150 
1151 	spin_lock(&ctx->completion_lock);
1152 	io_disarm_next(req);
1153 	spin_unlock(&ctx->completion_lock);
1154 }
1155 
1156 static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
1157 {
1158 	struct io_kiocb *nxt;
1159 
1160 	/*
1161 	 * If LINK is set, we have dependent requests in this chain. If we
1162 	 * didn't fail this request, queue the first one up, moving any other
1163 	 * dependencies to the next request. In case of failure, fail the rest
1164 	 * of the chain.
1165 	 */
1166 	if (unlikely(req->flags & IO_DISARM_MASK))
1167 		__io_req_find_next_prep(req);
1168 	nxt = req->link;
1169 	req->link = NULL;
1170 	return nxt;
1171 }
1172 
1173 static void ctx_flush_and_put(struct io_ring_ctx *ctx, struct io_tw_state *ts)
1174 {
1175 	if (!ctx)
1176 		return;
1177 	if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1178 		atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1179 	if (ts->locked) {
1180 		io_submit_flush_completions(ctx);
1181 		mutex_unlock(&ctx->uring_lock);
1182 		ts->locked = false;
1183 	}
1184 	percpu_ref_put(&ctx->refs);
1185 }
1186 
1187 static unsigned int handle_tw_list(struct llist_node *node,
1188 				   struct io_ring_ctx **ctx,
1189 				   struct io_tw_state *ts,
1190 				   struct llist_node *last)
1191 {
1192 	unsigned int count = 0;
1193 
1194 	while (node && node != last) {
1195 		struct llist_node *next = node->next;
1196 		struct io_kiocb *req = container_of(node, struct io_kiocb,
1197 						    io_task_work.node);
1198 
1199 		prefetch(container_of(next, struct io_kiocb, io_task_work.node));
1200 
1201 		if (req->ctx != *ctx) {
1202 			ctx_flush_and_put(*ctx, ts);
1203 			*ctx = req->ctx;
1204 			/* if not contended, grab and improve batching */
1205 			ts->locked = mutex_trylock(&(*ctx)->uring_lock);
1206 			percpu_ref_get(&(*ctx)->refs);
1207 		}
1208 		req->io_task_work.func(req, ts);
1209 		node = next;
1210 		count++;
1211 		if (unlikely(need_resched())) {
1212 			ctx_flush_and_put(*ctx, ts);
1213 			*ctx = NULL;
1214 			cond_resched();
1215 		}
1216 	}
1217 
1218 	return count;
1219 }
1220 
1221 /**
1222  * io_llist_xchg - swap all entries in a lock-less list
1223  * @head:	the head of lock-less list to delete all entries
1224  * @new:	new entry as the head of the list
1225  *
1226  * If list is empty, return NULL, otherwise, return the pointer to the first entry.
1227  * The order of entries returned is from the newest to the oldest added one.
1228  */
1229 static inline struct llist_node *io_llist_xchg(struct llist_head *head,
1230 					       struct llist_node *new)
1231 {
1232 	return xchg(&head->first, new);
1233 }
1234 
1235 /**
1236  * io_llist_cmpxchg - possibly swap all entries in a lock-less list
1237  * @head:	the head of lock-less list to delete all entries
1238  * @old:	expected old value of the first entry of the list
1239  * @new:	new entry as the head of the list
1240  *
1241  * perform a cmpxchg on the first entry of the list.
1242  */
1243 
1244 static inline struct llist_node *io_llist_cmpxchg(struct llist_head *head,
1245 						  struct llist_node *old,
1246 						  struct llist_node *new)
1247 {
1248 	return cmpxchg(&head->first, old, new);
1249 }
1250 
1251 void tctx_task_work(struct callback_head *cb)
1252 {
1253 	struct io_tw_state ts = {};
1254 	struct io_ring_ctx *ctx = NULL;
1255 	struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
1256 						  task_work);
1257 	struct llist_node fake = {};
1258 	struct llist_node *node;
1259 	unsigned int loops = 0;
1260 	unsigned int count = 0;
1261 
1262 	if (unlikely(current->flags & PF_EXITING)) {
1263 		io_fallback_tw(tctx);
1264 		return;
1265 	}
1266 
1267 	do {
1268 		loops++;
1269 		node = io_llist_xchg(&tctx->task_list, &fake);
1270 		count += handle_tw_list(node, &ctx, &ts, &fake);
1271 
1272 		/* skip expensive cmpxchg if there are items in the list */
1273 		if (READ_ONCE(tctx->task_list.first) != &fake)
1274 			continue;
1275 		if (ts.locked && !wq_list_empty(&ctx->submit_state.compl_reqs)) {
1276 			io_submit_flush_completions(ctx);
1277 			if (READ_ONCE(tctx->task_list.first) != &fake)
1278 				continue;
1279 		}
1280 		node = io_llist_cmpxchg(&tctx->task_list, &fake, NULL);
1281 	} while (node != &fake);
1282 
1283 	ctx_flush_and_put(ctx, &ts);
1284 
1285 	/* relaxed read is enough as only the task itself sets ->in_cancel */
1286 	if (unlikely(atomic_read(&tctx->in_cancel)))
1287 		io_uring_drop_tctx_refs(current);
1288 
1289 	trace_io_uring_task_work_run(tctx, count, loops);
1290 }
1291 
1292 static __cold void io_fallback_tw(struct io_uring_task *tctx)
1293 {
1294 	struct llist_node *node = llist_del_all(&tctx->task_list);
1295 	struct io_kiocb *req;
1296 
1297 	while (node) {
1298 		req = container_of(node, struct io_kiocb, io_task_work.node);
1299 		node = node->next;
1300 		if (llist_add(&req->io_task_work.node,
1301 			      &req->ctx->fallback_llist))
1302 			schedule_delayed_work(&req->ctx->fallback_work, 1);
1303 	}
1304 }
1305 
1306 static void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
1307 {
1308 	struct io_ring_ctx *ctx = req->ctx;
1309 	unsigned nr_wait, nr_tw, nr_tw_prev;
1310 	struct llist_node *first;
1311 
1312 	if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK))
1313 		flags &= ~IOU_F_TWQ_LAZY_WAKE;
1314 
1315 	first = READ_ONCE(ctx->work_llist.first);
1316 	do {
1317 		nr_tw_prev = 0;
1318 		if (first) {
1319 			struct io_kiocb *first_req = container_of(first,
1320 							struct io_kiocb,
1321 							io_task_work.node);
1322 			/*
1323 			 * Might be executed at any moment, rely on
1324 			 * SLAB_TYPESAFE_BY_RCU to keep it alive.
1325 			 */
1326 			nr_tw_prev = READ_ONCE(first_req->nr_tw);
1327 		}
1328 		nr_tw = nr_tw_prev + 1;
1329 		/* Large enough to fail the nr_wait comparison below */
1330 		if (!(flags & IOU_F_TWQ_LAZY_WAKE))
1331 			nr_tw = -1U;
1332 
1333 		req->nr_tw = nr_tw;
1334 		req->io_task_work.node.next = first;
1335 	} while (!try_cmpxchg(&ctx->work_llist.first, &first,
1336 			      &req->io_task_work.node));
1337 
1338 	if (!first) {
1339 		if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1340 			atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1341 		if (ctx->has_evfd)
1342 			io_eventfd_signal(ctx);
1343 	}
1344 
1345 	nr_wait = atomic_read(&ctx->cq_wait_nr);
1346 	/* no one is waiting */
1347 	if (!nr_wait)
1348 		return;
1349 	/* either not enough or the previous add has already woken it up */
1350 	if (nr_wait > nr_tw || nr_tw_prev >= nr_wait)
1351 		return;
1352 	/* pairs with set_current_state() in io_cqring_wait() */
1353 	smp_mb__after_atomic();
1354 	wake_up_state(ctx->submitter_task, TASK_INTERRUPTIBLE);
1355 }
1356 
1357 void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
1358 {
1359 	struct io_uring_task *tctx = req->task->io_uring;
1360 	struct io_ring_ctx *ctx = req->ctx;
1361 
1362 	if (!(flags & IOU_F_TWQ_FORCE_NORMAL) &&
1363 	    (ctx->flags & IORING_SETUP_DEFER_TASKRUN)) {
1364 		rcu_read_lock();
1365 		io_req_local_work_add(req, flags);
1366 		rcu_read_unlock();
1367 		return;
1368 	}
1369 
1370 	/* task_work already pending, we're done */
1371 	if (!llist_add(&req->io_task_work.node, &tctx->task_list))
1372 		return;
1373 
1374 	if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1375 		atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1376 
1377 	if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
1378 		return;
1379 
1380 	io_fallback_tw(tctx);
1381 }
1382 
1383 static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
1384 {
1385 	struct llist_node *node;
1386 
1387 	node = llist_del_all(&ctx->work_llist);
1388 	while (node) {
1389 		struct io_kiocb *req = container_of(node, struct io_kiocb,
1390 						    io_task_work.node);
1391 
1392 		node = node->next;
1393 		__io_req_task_work_add(req, IOU_F_TWQ_FORCE_NORMAL);
1394 	}
1395 }
1396 
1397 static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts)
1398 {
1399 	struct llist_node *node;
1400 	unsigned int loops = 0;
1401 	int ret = 0;
1402 
1403 	if (WARN_ON_ONCE(ctx->submitter_task != current))
1404 		return -EEXIST;
1405 	if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1406 		atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1407 again:
1408 	node = io_llist_xchg(&ctx->work_llist, NULL);
1409 	while (node) {
1410 		struct llist_node *next = node->next;
1411 		struct io_kiocb *req = container_of(node, struct io_kiocb,
1412 						    io_task_work.node);
1413 		prefetch(container_of(next, struct io_kiocb, io_task_work.node));
1414 		req->io_task_work.func(req, ts);
1415 		ret++;
1416 		node = next;
1417 	}
1418 	loops++;
1419 
1420 	if (!llist_empty(&ctx->work_llist))
1421 		goto again;
1422 	if (ts->locked) {
1423 		io_submit_flush_completions(ctx);
1424 		if (!llist_empty(&ctx->work_llist))
1425 			goto again;
1426 	}
1427 	trace_io_uring_local_work_run(ctx, ret, loops);
1428 	return ret;
1429 }
1430 
1431 static inline int io_run_local_work_locked(struct io_ring_ctx *ctx)
1432 {
1433 	struct io_tw_state ts = { .locked = true, };
1434 	int ret;
1435 
1436 	if (llist_empty(&ctx->work_llist))
1437 		return 0;
1438 
1439 	ret = __io_run_local_work(ctx, &ts);
1440 	/* shouldn't happen! */
1441 	if (WARN_ON_ONCE(!ts.locked))
1442 		mutex_lock(&ctx->uring_lock);
1443 	return ret;
1444 }
1445 
1446 static int io_run_local_work(struct io_ring_ctx *ctx)
1447 {
1448 	struct io_tw_state ts = {};
1449 	int ret;
1450 
1451 	ts.locked = mutex_trylock(&ctx->uring_lock);
1452 	ret = __io_run_local_work(ctx, &ts);
1453 	if (ts.locked)
1454 		mutex_unlock(&ctx->uring_lock);
1455 
1456 	return ret;
1457 }
1458 
1459 static void io_req_task_cancel(struct io_kiocb *req, struct io_tw_state *ts)
1460 {
1461 	io_tw_lock(req->ctx, ts);
1462 	io_req_defer_failed(req, req->cqe.res);
1463 }
1464 
1465 void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts)
1466 {
1467 	io_tw_lock(req->ctx, ts);
1468 	/* req->task == current here, checking PF_EXITING is safe */
1469 	if (unlikely(req->task->flags & PF_EXITING))
1470 		io_req_defer_failed(req, -EFAULT);
1471 	else if (req->flags & REQ_F_FORCE_ASYNC)
1472 		io_queue_iowq(req, ts);
1473 	else
1474 		io_queue_sqe(req);
1475 }
1476 
1477 void io_req_task_queue_fail(struct io_kiocb *req, int ret)
1478 {
1479 	io_req_set_res(req, ret, 0);
1480 	req->io_task_work.func = io_req_task_cancel;
1481 	io_req_task_work_add(req);
1482 }
1483 
1484 void io_req_task_queue(struct io_kiocb *req)
1485 {
1486 	req->io_task_work.func = io_req_task_submit;
1487 	io_req_task_work_add(req);
1488 }
1489 
1490 void io_queue_next(struct io_kiocb *req)
1491 {
1492 	struct io_kiocb *nxt = io_req_find_next(req);
1493 
1494 	if (nxt)
1495 		io_req_task_queue(nxt);
1496 }
1497 
1498 void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node)
1499 	__must_hold(&ctx->uring_lock)
1500 {
1501 	struct task_struct *task = NULL;
1502 	int task_refs = 0;
1503 
1504 	do {
1505 		struct io_kiocb *req = container_of(node, struct io_kiocb,
1506 						    comp_list);
1507 
1508 		if (unlikely(req->flags & IO_REQ_CLEAN_SLOW_FLAGS)) {
1509 			if (req->flags & REQ_F_REFCOUNT) {
1510 				node = req->comp_list.next;
1511 				if (!req_ref_put_and_test(req))
1512 					continue;
1513 			}
1514 			if ((req->flags & REQ_F_POLLED) && req->apoll) {
1515 				struct async_poll *apoll = req->apoll;
1516 
1517 				if (apoll->double_poll)
1518 					kfree(apoll->double_poll);
1519 				if (!io_alloc_cache_put(&ctx->apoll_cache, &apoll->cache))
1520 					kfree(apoll);
1521 				req->flags &= ~REQ_F_POLLED;
1522 			}
1523 			if (req->flags & IO_REQ_LINK_FLAGS)
1524 				io_queue_next(req);
1525 			if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
1526 				io_clean_op(req);
1527 		}
1528 		if (!(req->flags & REQ_F_FIXED_FILE))
1529 			io_put_file(req->file);
1530 
1531 		io_req_put_rsrc_locked(req, ctx);
1532 
1533 		if (req->task != task) {
1534 			if (task)
1535 				io_put_task(task, task_refs);
1536 			task = req->task;
1537 			task_refs = 0;
1538 		}
1539 		task_refs++;
1540 		node = req->comp_list.next;
1541 		io_req_add_to_cache(req, ctx);
1542 	} while (node);
1543 
1544 	if (task)
1545 		io_put_task(task, task_refs);
1546 }
1547 
1548 static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
1549 	__must_hold(&ctx->uring_lock)
1550 {
1551 	struct io_submit_state *state = &ctx->submit_state;
1552 	struct io_wq_work_node *node;
1553 
1554 	__io_cq_lock(ctx);
1555 	/* must come first to preserve CQE ordering in failure cases */
1556 	if (state->cqes_count)
1557 		__io_flush_post_cqes(ctx);
1558 	__wq_list_for_each(node, &state->compl_reqs) {
1559 		struct io_kiocb *req = container_of(node, struct io_kiocb,
1560 					    comp_list);
1561 
1562 		if (!(req->flags & REQ_F_CQE_SKIP) &&
1563 		    unlikely(!__io_fill_cqe_req(ctx, req))) {
1564 			if (ctx->task_complete) {
1565 				spin_lock(&ctx->completion_lock);
1566 				io_req_cqe_overflow(req);
1567 				spin_unlock(&ctx->completion_lock);
1568 			} else {
1569 				io_req_cqe_overflow(req);
1570 			}
1571 		}
1572 	}
1573 	__io_cq_unlock_post_flush(ctx);
1574 
1575 	if (!wq_list_empty(&ctx->submit_state.compl_reqs)) {
1576 		io_free_batch_list(ctx, state->compl_reqs.first);
1577 		INIT_WQ_LIST(&state->compl_reqs);
1578 	}
1579 }
1580 
1581 /*
1582  * Drop reference to request, return next in chain (if there is one) if this
1583  * was the last reference to this request.
1584  */
1585 static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
1586 {
1587 	struct io_kiocb *nxt = NULL;
1588 
1589 	if (req_ref_put_and_test(req)) {
1590 		if (unlikely(req->flags & IO_REQ_LINK_FLAGS))
1591 			nxt = io_req_find_next(req);
1592 		io_free_req(req);
1593 	}
1594 	return nxt;
1595 }
1596 
1597 static unsigned io_cqring_events(struct io_ring_ctx *ctx)
1598 {
1599 	/* See comment at the top of this file */
1600 	smp_rmb();
1601 	return __io_cqring_events(ctx);
1602 }
1603 
1604 /*
1605  * We can't just wait for polled events to come to us, we have to actively
1606  * find and complete them.
1607  */
1608 static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
1609 {
1610 	if (!(ctx->flags & IORING_SETUP_IOPOLL))
1611 		return;
1612 
1613 	mutex_lock(&ctx->uring_lock);
1614 	while (!wq_list_empty(&ctx->iopoll_list)) {
1615 		/* let it sleep and repeat later if can't complete a request */
1616 		if (io_do_iopoll(ctx, true) == 0)
1617 			break;
1618 		/*
1619 		 * Ensure we allow local-to-the-cpu processing to take place,
1620 		 * in this case we need to ensure that we reap all events.
1621 		 * Also let task_work, etc. to progress by releasing the mutex
1622 		 */
1623 		if (need_resched()) {
1624 			mutex_unlock(&ctx->uring_lock);
1625 			cond_resched();
1626 			mutex_lock(&ctx->uring_lock);
1627 		}
1628 	}
1629 	mutex_unlock(&ctx->uring_lock);
1630 }
1631 
1632 static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
1633 {
1634 	unsigned int nr_events = 0;
1635 	int ret = 0;
1636 	unsigned long check_cq;
1637 
1638 	if (!io_allowed_run_tw(ctx))
1639 		return -EEXIST;
1640 
1641 	check_cq = READ_ONCE(ctx->check_cq);
1642 	if (unlikely(check_cq)) {
1643 		if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
1644 			__io_cqring_overflow_flush(ctx);
1645 		/*
1646 		 * Similarly do not spin if we have not informed the user of any
1647 		 * dropped CQE.
1648 		 */
1649 		if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
1650 			return -EBADR;
1651 	}
1652 	/*
1653 	 * Don't enter poll loop if we already have events pending.
1654 	 * If we do, we can potentially be spinning for commands that
1655 	 * already triggered a CQE (eg in error).
1656 	 */
1657 	if (io_cqring_events(ctx))
1658 		return 0;
1659 
1660 	do {
1661 		/*
1662 		 * If a submit got punted to a workqueue, we can have the
1663 		 * application entering polling for a command before it gets
1664 		 * issued. That app will hold the uring_lock for the duration
1665 		 * of the poll right here, so we need to take a breather every
1666 		 * now and then to ensure that the issue has a chance to add
1667 		 * the poll to the issued list. Otherwise we can spin here
1668 		 * forever, while the workqueue is stuck trying to acquire the
1669 		 * very same mutex.
1670 		 */
1671 		if (wq_list_empty(&ctx->iopoll_list) ||
1672 		    io_task_work_pending(ctx)) {
1673 			u32 tail = ctx->cached_cq_tail;
1674 
1675 			(void) io_run_local_work_locked(ctx);
1676 
1677 			if (task_work_pending(current) ||
1678 			    wq_list_empty(&ctx->iopoll_list)) {
1679 				mutex_unlock(&ctx->uring_lock);
1680 				io_run_task_work();
1681 				mutex_lock(&ctx->uring_lock);
1682 			}
1683 			/* some requests don't go through iopoll_list */
1684 			if (tail != ctx->cached_cq_tail ||
1685 			    wq_list_empty(&ctx->iopoll_list))
1686 				break;
1687 		}
1688 		ret = io_do_iopoll(ctx, !min);
1689 		if (ret < 0)
1690 			break;
1691 		nr_events += ret;
1692 		ret = 0;
1693 	} while (nr_events < min && !need_resched());
1694 
1695 	return ret;
1696 }
1697 
1698 void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts)
1699 {
1700 	if (ts->locked)
1701 		io_req_complete_defer(req);
1702 	else
1703 		io_req_complete_post(req, IO_URING_F_UNLOCKED);
1704 }
1705 
1706 /*
1707  * After the iocb has been issued, it's safe to be found on the poll list.
1708  * Adding the kiocb to the list AFTER submission ensures that we don't
1709  * find it from a io_do_iopoll() thread before the issuer is done
1710  * accessing the kiocb cookie.
1711  */
1712 static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags)
1713 {
1714 	struct io_ring_ctx *ctx = req->ctx;
1715 	const bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
1716 
1717 	/* workqueue context doesn't hold uring_lock, grab it now */
1718 	if (unlikely(needs_lock))
1719 		mutex_lock(&ctx->uring_lock);
1720 
1721 	/*
1722 	 * Track whether we have multiple files in our lists. This will impact
1723 	 * how we do polling eventually, not spinning if we're on potentially
1724 	 * different devices.
1725 	 */
1726 	if (wq_list_empty(&ctx->iopoll_list)) {
1727 		ctx->poll_multi_queue = false;
1728 	} else if (!ctx->poll_multi_queue) {
1729 		struct io_kiocb *list_req;
1730 
1731 		list_req = container_of(ctx->iopoll_list.first, struct io_kiocb,
1732 					comp_list);
1733 		if (list_req->file != req->file)
1734 			ctx->poll_multi_queue = true;
1735 	}
1736 
1737 	/*
1738 	 * For fast devices, IO may have already completed. If it has, add
1739 	 * it to the front so we find it first.
1740 	 */
1741 	if (READ_ONCE(req->iopoll_completed))
1742 		wq_list_add_head(&req->comp_list, &ctx->iopoll_list);
1743 	else
1744 		wq_list_add_tail(&req->comp_list, &ctx->iopoll_list);
1745 
1746 	if (unlikely(needs_lock)) {
1747 		/*
1748 		 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
1749 		 * in sq thread task context or in io worker task context. If
1750 		 * current task context is sq thread, we don't need to check
1751 		 * whether should wake up sq thread.
1752 		 */
1753 		if ((ctx->flags & IORING_SETUP_SQPOLL) &&
1754 		    wq_has_sleeper(&ctx->sq_data->wait))
1755 			wake_up(&ctx->sq_data->wait);
1756 
1757 		mutex_unlock(&ctx->uring_lock);
1758 	}
1759 }
1760 
1761 static bool io_bdev_nowait(struct block_device *bdev)
1762 {
1763 	return !bdev || bdev_nowait(bdev);
1764 }
1765 
1766 /*
1767  * If we tracked the file through the SCM inflight mechanism, we could support
1768  * any file. For now, just ensure that anything potentially problematic is done
1769  * inline.
1770  */
1771 static bool __io_file_supports_nowait(struct file *file, umode_t mode)
1772 {
1773 	if (S_ISBLK(mode)) {
1774 		if (IS_ENABLED(CONFIG_BLOCK) &&
1775 		    io_bdev_nowait(I_BDEV(file->f_mapping->host)))
1776 			return true;
1777 		return false;
1778 	}
1779 	if (S_ISSOCK(mode))
1780 		return true;
1781 	if (S_ISREG(mode)) {
1782 		if (IS_ENABLED(CONFIG_BLOCK) &&
1783 		    io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
1784 		    !io_is_uring_fops(file))
1785 			return true;
1786 		return false;
1787 	}
1788 
1789 	/* any ->read/write should understand O_NONBLOCK */
1790 	if (file->f_flags & O_NONBLOCK)
1791 		return true;
1792 	return file->f_mode & FMODE_NOWAIT;
1793 }
1794 
1795 /*
1796  * If we tracked the file through the SCM inflight mechanism, we could support
1797  * any file. For now, just ensure that anything potentially problematic is done
1798  * inline.
1799  */
1800 unsigned int io_file_get_flags(struct file *file)
1801 {
1802 	umode_t mode = file_inode(file)->i_mode;
1803 	unsigned int res = 0;
1804 
1805 	if (S_ISREG(mode))
1806 		res |= FFS_ISREG;
1807 	if (__io_file_supports_nowait(file, mode))
1808 		res |= FFS_NOWAIT;
1809 	return res;
1810 }
1811 
1812 bool io_alloc_async_data(struct io_kiocb *req)
1813 {
1814 	WARN_ON_ONCE(!io_cold_defs[req->opcode].async_size);
1815 	req->async_data = kmalloc(io_cold_defs[req->opcode].async_size, GFP_KERNEL);
1816 	if (req->async_data) {
1817 		req->flags |= REQ_F_ASYNC_DATA;
1818 		return false;
1819 	}
1820 	return true;
1821 }
1822 
1823 int io_req_prep_async(struct io_kiocb *req)
1824 {
1825 	const struct io_cold_def *cdef = &io_cold_defs[req->opcode];
1826 	const struct io_issue_def *def = &io_issue_defs[req->opcode];
1827 
1828 	/* assign early for deferred execution for non-fixed file */
1829 	if (def->needs_file && !(req->flags & REQ_F_FIXED_FILE) && !req->file)
1830 		req->file = io_file_get_normal(req, req->cqe.fd);
1831 	if (!cdef->prep_async)
1832 		return 0;
1833 	if (WARN_ON_ONCE(req_has_async_data(req)))
1834 		return -EFAULT;
1835 	if (!def->manual_alloc) {
1836 		if (io_alloc_async_data(req))
1837 			return -EAGAIN;
1838 	}
1839 	return cdef->prep_async(req);
1840 }
1841 
1842 static u32 io_get_sequence(struct io_kiocb *req)
1843 {
1844 	u32 seq = req->ctx->cached_sq_head;
1845 	struct io_kiocb *cur;
1846 
1847 	/* need original cached_sq_head, but it was increased for each req */
1848 	io_for_each_link(cur, req)
1849 		seq--;
1850 	return seq;
1851 }
1852 
1853 static __cold void io_drain_req(struct io_kiocb *req)
1854 	__must_hold(&ctx->uring_lock)
1855 {
1856 	struct io_ring_ctx *ctx = req->ctx;
1857 	struct io_defer_entry *de;
1858 	int ret;
1859 	u32 seq = io_get_sequence(req);
1860 
1861 	/* Still need defer if there is pending req in defer list. */
1862 	spin_lock(&ctx->completion_lock);
1863 	if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) {
1864 		spin_unlock(&ctx->completion_lock);
1865 queue:
1866 		ctx->drain_active = false;
1867 		io_req_task_queue(req);
1868 		return;
1869 	}
1870 	spin_unlock(&ctx->completion_lock);
1871 
1872 	io_prep_async_link(req);
1873 	de = kmalloc(sizeof(*de), GFP_KERNEL);
1874 	if (!de) {
1875 		ret = -ENOMEM;
1876 		io_req_defer_failed(req, ret);
1877 		return;
1878 	}
1879 
1880 	spin_lock(&ctx->completion_lock);
1881 	if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
1882 		spin_unlock(&ctx->completion_lock);
1883 		kfree(de);
1884 		goto queue;
1885 	}
1886 
1887 	trace_io_uring_defer(req);
1888 	de->req = req;
1889 	de->seq = seq;
1890 	list_add_tail(&de->list, &ctx->defer_list);
1891 	spin_unlock(&ctx->completion_lock);
1892 }
1893 
1894 static void io_clean_op(struct io_kiocb *req)
1895 {
1896 	if (req->flags & REQ_F_BUFFER_SELECTED) {
1897 		spin_lock(&req->ctx->completion_lock);
1898 		io_put_kbuf_comp(req);
1899 		spin_unlock(&req->ctx->completion_lock);
1900 	}
1901 
1902 	if (req->flags & REQ_F_NEED_CLEANUP) {
1903 		const struct io_cold_def *def = &io_cold_defs[req->opcode];
1904 
1905 		if (def->cleanup)
1906 			def->cleanup(req);
1907 	}
1908 	if ((req->flags & REQ_F_POLLED) && req->apoll) {
1909 		kfree(req->apoll->double_poll);
1910 		kfree(req->apoll);
1911 		req->apoll = NULL;
1912 	}
1913 	if (req->flags & REQ_F_INFLIGHT) {
1914 		struct io_uring_task *tctx = req->task->io_uring;
1915 
1916 		atomic_dec(&tctx->inflight_tracked);
1917 	}
1918 	if (req->flags & REQ_F_CREDS)
1919 		put_cred(req->creds);
1920 	if (req->flags & REQ_F_ASYNC_DATA) {
1921 		kfree(req->async_data);
1922 		req->async_data = NULL;
1923 	}
1924 	req->flags &= ~IO_REQ_CLEAN_FLAGS;
1925 }
1926 
1927 static bool io_assign_file(struct io_kiocb *req, const struct io_issue_def *def,
1928 			   unsigned int issue_flags)
1929 {
1930 	if (req->file || !def->needs_file)
1931 		return true;
1932 
1933 	if (req->flags & REQ_F_FIXED_FILE)
1934 		req->file = io_file_get_fixed(req, req->cqe.fd, issue_flags);
1935 	else
1936 		req->file = io_file_get_normal(req, req->cqe.fd);
1937 
1938 	return !!req->file;
1939 }
1940 
1941 static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
1942 {
1943 	const struct io_issue_def *def = &io_issue_defs[req->opcode];
1944 	const struct cred *creds = NULL;
1945 	int ret;
1946 
1947 	if (unlikely(!io_assign_file(req, def, issue_flags)))
1948 		return -EBADF;
1949 
1950 	if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
1951 		creds = override_creds(req->creds);
1952 
1953 	if (!def->audit_skip)
1954 		audit_uring_entry(req->opcode);
1955 
1956 	ret = def->issue(req, issue_flags);
1957 
1958 	if (!def->audit_skip)
1959 		audit_uring_exit(!ret, ret);
1960 
1961 	if (creds)
1962 		revert_creds(creds);
1963 
1964 	if (ret == IOU_OK) {
1965 		if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1966 			io_req_complete_defer(req);
1967 		else
1968 			io_req_complete_post(req, issue_flags);
1969 	} else if (ret != IOU_ISSUE_SKIP_COMPLETE)
1970 		return ret;
1971 
1972 	/* If the op doesn't have a file, we're not polling for it */
1973 	if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue)
1974 		io_iopoll_req_issued(req, issue_flags);
1975 
1976 	return 0;
1977 }
1978 
1979 int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts)
1980 {
1981 	io_tw_lock(req->ctx, ts);
1982 	return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT|
1983 				 IO_URING_F_COMPLETE_DEFER);
1984 }
1985 
1986 struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
1987 {
1988 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1989 
1990 	req = io_put_req_find_next(req);
1991 	return req ? &req->work : NULL;
1992 }
1993 
1994 void io_wq_submit_work(struct io_wq_work *work)
1995 {
1996 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1997 	const struct io_issue_def *def = &io_issue_defs[req->opcode];
1998 	unsigned int issue_flags = IO_URING_F_UNLOCKED | IO_URING_F_IOWQ;
1999 	bool needs_poll = false;
2000 	int ret = 0, err = -ECANCELED;
2001 
2002 	/* one will be dropped by ->io_wq_free_work() after returning to io-wq */
2003 	if (!(req->flags & REQ_F_REFCOUNT))
2004 		__io_req_set_refcount(req, 2);
2005 	else
2006 		req_ref_get(req);
2007 
2008 	io_arm_ltimeout(req);
2009 
2010 	/* either cancelled or io-wq is dying, so don't touch tctx->iowq */
2011 	if (work->flags & IO_WQ_WORK_CANCEL) {
2012 fail:
2013 		io_req_task_queue_fail(req, err);
2014 		return;
2015 	}
2016 	if (!io_assign_file(req, def, issue_flags)) {
2017 		err = -EBADF;
2018 		work->flags |= IO_WQ_WORK_CANCEL;
2019 		goto fail;
2020 	}
2021 
2022 	if (req->flags & REQ_F_FORCE_ASYNC) {
2023 		bool opcode_poll = def->pollin || def->pollout;
2024 
2025 		if (opcode_poll && file_can_poll(req->file)) {
2026 			needs_poll = true;
2027 			issue_flags |= IO_URING_F_NONBLOCK;
2028 		}
2029 	}
2030 
2031 	do {
2032 		ret = io_issue_sqe(req, issue_flags);
2033 		if (ret != -EAGAIN)
2034 			break;
2035 		/*
2036 		 * We can get EAGAIN for iopolled IO even though we're
2037 		 * forcing a sync submission from here, since we can't
2038 		 * wait for request slots on the block side.
2039 		 */
2040 		if (!needs_poll) {
2041 			if (!(req->ctx->flags & IORING_SETUP_IOPOLL))
2042 				break;
2043 			cond_resched();
2044 			continue;
2045 		}
2046 
2047 		if (io_arm_poll_handler(req, issue_flags) == IO_APOLL_OK)
2048 			return;
2049 		/* aborted or ready, in either case retry blocking */
2050 		needs_poll = false;
2051 		issue_flags &= ~IO_URING_F_NONBLOCK;
2052 	} while (1);
2053 
2054 	/* avoid locking problems by failing it from a clean context */
2055 	if (ret < 0)
2056 		io_req_task_queue_fail(req, ret);
2057 }
2058 
2059 inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
2060 				      unsigned int issue_flags)
2061 {
2062 	struct io_ring_ctx *ctx = req->ctx;
2063 	struct file *file = NULL;
2064 	unsigned long file_ptr;
2065 
2066 	io_ring_submit_lock(ctx, issue_flags);
2067 
2068 	if (unlikely((unsigned int)fd >= ctx->nr_user_files))
2069 		goto out;
2070 	fd = array_index_nospec(fd, ctx->nr_user_files);
2071 	file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
2072 	file = (struct file *) (file_ptr & FFS_MASK);
2073 	file_ptr &= ~FFS_MASK;
2074 	/* mask in overlapping REQ_F and FFS bits */
2075 	req->flags |= (file_ptr << REQ_F_SUPPORT_NOWAIT_BIT);
2076 	io_req_set_rsrc_node(req, ctx, 0);
2077 out:
2078 	io_ring_submit_unlock(ctx, issue_flags);
2079 	return file;
2080 }
2081 
2082 struct file *io_file_get_normal(struct io_kiocb *req, int fd)
2083 {
2084 	struct file *file = fget(fd);
2085 
2086 	trace_io_uring_file_get(req, fd);
2087 
2088 	/* we don't allow fixed io_uring files */
2089 	if (file && io_is_uring_fops(file))
2090 		io_req_track_inflight(req);
2091 	return file;
2092 }
2093 
2094 static void io_queue_async(struct io_kiocb *req, int ret)
2095 	__must_hold(&req->ctx->uring_lock)
2096 {
2097 	struct io_kiocb *linked_timeout;
2098 
2099 	if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
2100 		io_req_defer_failed(req, ret);
2101 		return;
2102 	}
2103 
2104 	linked_timeout = io_prep_linked_timeout(req);
2105 
2106 	switch (io_arm_poll_handler(req, 0)) {
2107 	case IO_APOLL_READY:
2108 		io_kbuf_recycle(req, 0);
2109 		io_req_task_queue(req);
2110 		break;
2111 	case IO_APOLL_ABORTED:
2112 		io_kbuf_recycle(req, 0);
2113 		io_queue_iowq(req, NULL);
2114 		break;
2115 	case IO_APOLL_OK:
2116 		break;
2117 	}
2118 
2119 	if (linked_timeout)
2120 		io_queue_linked_timeout(linked_timeout);
2121 }
2122 
2123 static inline void io_queue_sqe(struct io_kiocb *req)
2124 	__must_hold(&req->ctx->uring_lock)
2125 {
2126 	int ret;
2127 
2128 	ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
2129 
2130 	/*
2131 	 * We async punt it if the file wasn't marked NOWAIT, or if the file
2132 	 * doesn't support non-blocking read/write attempts
2133 	 */
2134 	if (likely(!ret))
2135 		io_arm_ltimeout(req);
2136 	else
2137 		io_queue_async(req, ret);
2138 }
2139 
2140 static void io_queue_sqe_fallback(struct io_kiocb *req)
2141 	__must_hold(&req->ctx->uring_lock)
2142 {
2143 	if (unlikely(req->flags & REQ_F_FAIL)) {
2144 		/*
2145 		 * We don't submit, fail them all, for that replace hardlinks
2146 		 * with normal links. Extra REQ_F_LINK is tolerated.
2147 		 */
2148 		req->flags &= ~REQ_F_HARDLINK;
2149 		req->flags |= REQ_F_LINK;
2150 		io_req_defer_failed(req, req->cqe.res);
2151 	} else {
2152 		int ret = io_req_prep_async(req);
2153 
2154 		if (unlikely(ret)) {
2155 			io_req_defer_failed(req, ret);
2156 			return;
2157 		}
2158 
2159 		if (unlikely(req->ctx->drain_active))
2160 			io_drain_req(req);
2161 		else
2162 			io_queue_iowq(req, NULL);
2163 	}
2164 }
2165 
2166 /*
2167  * Check SQE restrictions (opcode and flags).
2168  *
2169  * Returns 'true' if SQE is allowed, 'false' otherwise.
2170  */
2171 static inline bool io_check_restriction(struct io_ring_ctx *ctx,
2172 					struct io_kiocb *req,
2173 					unsigned int sqe_flags)
2174 {
2175 	if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
2176 		return false;
2177 
2178 	if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
2179 	    ctx->restrictions.sqe_flags_required)
2180 		return false;
2181 
2182 	if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
2183 			  ctx->restrictions.sqe_flags_required))
2184 		return false;
2185 
2186 	return true;
2187 }
2188 
2189 static void io_init_req_drain(struct io_kiocb *req)
2190 {
2191 	struct io_ring_ctx *ctx = req->ctx;
2192 	struct io_kiocb *head = ctx->submit_state.link.head;
2193 
2194 	ctx->drain_active = true;
2195 	if (head) {
2196 		/*
2197 		 * If we need to drain a request in the middle of a link, drain
2198 		 * the head request and the next request/link after the current
2199 		 * link. Considering sequential execution of links,
2200 		 * REQ_F_IO_DRAIN will be maintained for every request of our
2201 		 * link.
2202 		 */
2203 		head->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
2204 		ctx->drain_next = true;
2205 	}
2206 }
2207 
2208 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
2209 		       const struct io_uring_sqe *sqe)
2210 	__must_hold(&ctx->uring_lock)
2211 {
2212 	const struct io_issue_def *def;
2213 	unsigned int sqe_flags;
2214 	int personality;
2215 	u8 opcode;
2216 
2217 	/* req is partially pre-initialised, see io_preinit_req() */
2218 	req->opcode = opcode = READ_ONCE(sqe->opcode);
2219 	/* same numerical values with corresponding REQ_F_*, safe to copy */
2220 	req->flags = sqe_flags = READ_ONCE(sqe->flags);
2221 	req->cqe.user_data = READ_ONCE(sqe->user_data);
2222 	req->file = NULL;
2223 	req->rsrc_node = NULL;
2224 	req->task = current;
2225 
2226 	if (unlikely(opcode >= IORING_OP_LAST)) {
2227 		req->opcode = 0;
2228 		return -EINVAL;
2229 	}
2230 	def = &io_issue_defs[opcode];
2231 	if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
2232 		/* enforce forwards compatibility on users */
2233 		if (sqe_flags & ~SQE_VALID_FLAGS)
2234 			return -EINVAL;
2235 		if (sqe_flags & IOSQE_BUFFER_SELECT) {
2236 			if (!def->buffer_select)
2237 				return -EOPNOTSUPP;
2238 			req->buf_index = READ_ONCE(sqe->buf_group);
2239 		}
2240 		if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS)
2241 			ctx->drain_disabled = true;
2242 		if (sqe_flags & IOSQE_IO_DRAIN) {
2243 			if (ctx->drain_disabled)
2244 				return -EOPNOTSUPP;
2245 			io_init_req_drain(req);
2246 		}
2247 	}
2248 	if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
2249 		if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags))
2250 			return -EACCES;
2251 		/* knock it to the slow queue path, will be drained there */
2252 		if (ctx->drain_active)
2253 			req->flags |= REQ_F_FORCE_ASYNC;
2254 		/* if there is no link, we're at "next" request and need to drain */
2255 		if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) {
2256 			ctx->drain_next = false;
2257 			ctx->drain_active = true;
2258 			req->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
2259 		}
2260 	}
2261 
2262 	if (!def->ioprio && sqe->ioprio)
2263 		return -EINVAL;
2264 	if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL))
2265 		return -EINVAL;
2266 
2267 	if (def->needs_file) {
2268 		struct io_submit_state *state = &ctx->submit_state;
2269 
2270 		req->cqe.fd = READ_ONCE(sqe->fd);
2271 
2272 		/*
2273 		 * Plug now if we have more than 2 IO left after this, and the
2274 		 * target is potentially a read/write to block based storage.
2275 		 */
2276 		if (state->need_plug && def->plug) {
2277 			state->plug_started = true;
2278 			state->need_plug = false;
2279 			blk_start_plug_nr_ios(&state->plug, state->submit_nr);
2280 		}
2281 	}
2282 
2283 	personality = READ_ONCE(sqe->personality);
2284 	if (personality) {
2285 		int ret;
2286 
2287 		req->creds = xa_load(&ctx->personalities, personality);
2288 		if (!req->creds)
2289 			return -EINVAL;
2290 		get_cred(req->creds);
2291 		ret = security_uring_override_creds(req->creds);
2292 		if (ret) {
2293 			put_cred(req->creds);
2294 			return ret;
2295 		}
2296 		req->flags |= REQ_F_CREDS;
2297 	}
2298 
2299 	return def->prep(req, sqe);
2300 }
2301 
2302 static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe,
2303 				      struct io_kiocb *req, int ret)
2304 {
2305 	struct io_ring_ctx *ctx = req->ctx;
2306 	struct io_submit_link *link = &ctx->submit_state.link;
2307 	struct io_kiocb *head = link->head;
2308 
2309 	trace_io_uring_req_failed(sqe, req, ret);
2310 
2311 	/*
2312 	 * Avoid breaking links in the middle as it renders links with SQPOLL
2313 	 * unusable. Instead of failing eagerly, continue assembling the link if
2314 	 * applicable and mark the head with REQ_F_FAIL. The link flushing code
2315 	 * should find the flag and handle the rest.
2316 	 */
2317 	req_fail_link_node(req, ret);
2318 	if (head && !(head->flags & REQ_F_FAIL))
2319 		req_fail_link_node(head, -ECANCELED);
2320 
2321 	if (!(req->flags & IO_REQ_LINK_FLAGS)) {
2322 		if (head) {
2323 			link->last->link = req;
2324 			link->head = NULL;
2325 			req = head;
2326 		}
2327 		io_queue_sqe_fallback(req);
2328 		return ret;
2329 	}
2330 
2331 	if (head)
2332 		link->last->link = req;
2333 	else
2334 		link->head = req;
2335 	link->last = req;
2336 	return 0;
2337 }
2338 
2339 static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
2340 			 const struct io_uring_sqe *sqe)
2341 	__must_hold(&ctx->uring_lock)
2342 {
2343 	struct io_submit_link *link = &ctx->submit_state.link;
2344 	int ret;
2345 
2346 	ret = io_init_req(ctx, req, sqe);
2347 	if (unlikely(ret))
2348 		return io_submit_fail_init(sqe, req, ret);
2349 
2350 	trace_io_uring_submit_req(req);
2351 
2352 	/*
2353 	 * If we already have a head request, queue this one for async
2354 	 * submittal once the head completes. If we don't have a head but
2355 	 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
2356 	 * submitted sync once the chain is complete. If none of those
2357 	 * conditions are true (normal request), then just queue it.
2358 	 */
2359 	if (unlikely(link->head)) {
2360 		ret = io_req_prep_async(req);
2361 		if (unlikely(ret))
2362 			return io_submit_fail_init(sqe, req, ret);
2363 
2364 		trace_io_uring_link(req, link->head);
2365 		link->last->link = req;
2366 		link->last = req;
2367 
2368 		if (req->flags & IO_REQ_LINK_FLAGS)
2369 			return 0;
2370 		/* last request of the link, flush it */
2371 		req = link->head;
2372 		link->head = NULL;
2373 		if (req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL))
2374 			goto fallback;
2375 
2376 	} else if (unlikely(req->flags & (IO_REQ_LINK_FLAGS |
2377 					  REQ_F_FORCE_ASYNC | REQ_F_FAIL))) {
2378 		if (req->flags & IO_REQ_LINK_FLAGS) {
2379 			link->head = req;
2380 			link->last = req;
2381 		} else {
2382 fallback:
2383 			io_queue_sqe_fallback(req);
2384 		}
2385 		return 0;
2386 	}
2387 
2388 	io_queue_sqe(req);
2389 	return 0;
2390 }
2391 
2392 /*
2393  * Batched submission is done, ensure local IO is flushed out.
2394  */
2395 static void io_submit_state_end(struct io_ring_ctx *ctx)
2396 {
2397 	struct io_submit_state *state = &ctx->submit_state;
2398 
2399 	if (unlikely(state->link.head))
2400 		io_queue_sqe_fallback(state->link.head);
2401 	/* flush only after queuing links as they can generate completions */
2402 	io_submit_flush_completions(ctx);
2403 	if (state->plug_started)
2404 		blk_finish_plug(&state->plug);
2405 }
2406 
2407 /*
2408  * Start submission side cache.
2409  */
2410 static void io_submit_state_start(struct io_submit_state *state,
2411 				  unsigned int max_ios)
2412 {
2413 	state->plug_started = false;
2414 	state->need_plug = max_ios > 2;
2415 	state->submit_nr = max_ios;
2416 	/* set only head, no need to init link_last in advance */
2417 	state->link.head = NULL;
2418 }
2419 
2420 static void io_commit_sqring(struct io_ring_ctx *ctx)
2421 {
2422 	struct io_rings *rings = ctx->rings;
2423 
2424 	/*
2425 	 * Ensure any loads from the SQEs are done at this point,
2426 	 * since once we write the new head, the application could
2427 	 * write new data to them.
2428 	 */
2429 	smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2430 }
2431 
2432 /*
2433  * Fetch an sqe, if one is available. Note this returns a pointer to memory
2434  * that is mapped by userspace. This means that care needs to be taken to
2435  * ensure that reads are stable, as we cannot rely on userspace always
2436  * being a good citizen. If members of the sqe are validated and then later
2437  * used, it's important that those reads are done through READ_ONCE() to
2438  * prevent a re-load down the line.
2439  */
2440 static bool io_get_sqe(struct io_ring_ctx *ctx, const struct io_uring_sqe **sqe)
2441 {
2442 	unsigned head, mask = ctx->sq_entries - 1;
2443 	unsigned sq_idx = ctx->cached_sq_head++ & mask;
2444 
2445 	/*
2446 	 * The cached sq head (or cq tail) serves two purposes:
2447 	 *
2448 	 * 1) allows us to batch the cost of updating the user visible
2449 	 *    head updates.
2450 	 * 2) allows the kernel side to track the head on its own, even
2451 	 *    though the application is the one updating it.
2452 	 */
2453 	head = READ_ONCE(ctx->sq_array[sq_idx]);
2454 	if (likely(head < ctx->sq_entries)) {
2455 		/* double index for 128-byte SQEs, twice as long */
2456 		if (ctx->flags & IORING_SETUP_SQE128)
2457 			head <<= 1;
2458 		*sqe = &ctx->sq_sqes[head];
2459 		return true;
2460 	}
2461 
2462 	/* drop invalid entries */
2463 	ctx->cq_extra--;
2464 	WRITE_ONCE(ctx->rings->sq_dropped,
2465 		   READ_ONCE(ctx->rings->sq_dropped) + 1);
2466 	return false;
2467 }
2468 
2469 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
2470 	__must_hold(&ctx->uring_lock)
2471 {
2472 	unsigned int entries = io_sqring_entries(ctx);
2473 	unsigned int left;
2474 	int ret;
2475 
2476 	if (unlikely(!entries))
2477 		return 0;
2478 	/* make sure SQ entry isn't read before tail */
2479 	ret = left = min(nr, entries);
2480 	io_get_task_refs(left);
2481 	io_submit_state_start(&ctx->submit_state, left);
2482 
2483 	do {
2484 		const struct io_uring_sqe *sqe;
2485 		struct io_kiocb *req;
2486 
2487 		if (unlikely(!io_alloc_req(ctx, &req)))
2488 			break;
2489 		if (unlikely(!io_get_sqe(ctx, &sqe))) {
2490 			io_req_add_to_cache(req, ctx);
2491 			break;
2492 		}
2493 
2494 		/*
2495 		 * Continue submitting even for sqe failure if the
2496 		 * ring was setup with IORING_SETUP_SUBMIT_ALL
2497 		 */
2498 		if (unlikely(io_submit_sqe(ctx, req, sqe)) &&
2499 		    !(ctx->flags & IORING_SETUP_SUBMIT_ALL)) {
2500 			left--;
2501 			break;
2502 		}
2503 	} while (--left);
2504 
2505 	if (unlikely(left)) {
2506 		ret -= left;
2507 		/* try again if it submitted nothing and can't allocate a req */
2508 		if (!ret && io_req_cache_empty(ctx))
2509 			ret = -EAGAIN;
2510 		current->io_uring->cached_refs += left;
2511 	}
2512 
2513 	io_submit_state_end(ctx);
2514 	 /* Commit SQ ring head once we've consumed and submitted all SQEs */
2515 	io_commit_sqring(ctx);
2516 	return ret;
2517 }
2518 
2519 struct io_wait_queue {
2520 	struct wait_queue_entry wq;
2521 	struct io_ring_ctx *ctx;
2522 	unsigned cq_tail;
2523 	unsigned nr_timeouts;
2524 	ktime_t timeout;
2525 };
2526 
2527 static inline bool io_has_work(struct io_ring_ctx *ctx)
2528 {
2529 	return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) ||
2530 	       !llist_empty(&ctx->work_llist);
2531 }
2532 
2533 static inline bool io_should_wake(struct io_wait_queue *iowq)
2534 {
2535 	struct io_ring_ctx *ctx = iowq->ctx;
2536 	int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
2537 
2538 	/*
2539 	 * Wake up if we have enough events, or if a timeout occurred since we
2540 	 * started waiting. For timeouts, we always want to return to userspace,
2541 	 * regardless of event count.
2542 	 */
2543 	return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
2544 }
2545 
2546 static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
2547 			    int wake_flags, void *key)
2548 {
2549 	struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue, wq);
2550 
2551 	/*
2552 	 * Cannot safely flush overflowed CQEs from here, ensure we wake up
2553 	 * the task, and the next invocation will do it.
2554 	 */
2555 	if (io_should_wake(iowq) || io_has_work(iowq->ctx))
2556 		return autoremove_wake_function(curr, mode, wake_flags, key);
2557 	return -1;
2558 }
2559 
2560 int io_run_task_work_sig(struct io_ring_ctx *ctx)
2561 {
2562 	if (!llist_empty(&ctx->work_llist)) {
2563 		__set_current_state(TASK_RUNNING);
2564 		if (io_run_local_work(ctx) > 0)
2565 			return 1;
2566 	}
2567 	if (io_run_task_work() > 0)
2568 		return 1;
2569 	if (task_sigpending(current))
2570 		return -EINTR;
2571 	return 0;
2572 }
2573 
2574 /* when returns >0, the caller should retry */
2575 static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
2576 					  struct io_wait_queue *iowq)
2577 {
2578 	if (unlikely(READ_ONCE(ctx->check_cq)))
2579 		return 1;
2580 	if (unlikely(!llist_empty(&ctx->work_llist)))
2581 		return 1;
2582 	if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL)))
2583 		return 1;
2584 	if (unlikely(task_sigpending(current)))
2585 		return -EINTR;
2586 	if (unlikely(io_should_wake(iowq)))
2587 		return 0;
2588 	if (iowq->timeout == KTIME_MAX)
2589 		schedule();
2590 	else if (!schedule_hrtimeout(&iowq->timeout, HRTIMER_MODE_ABS))
2591 		return -ETIME;
2592 	return 0;
2593 }
2594 
2595 /*
2596  * Wait until events become available, if we don't already have some. The
2597  * application must reap them itself, as they reside on the shared cq ring.
2598  */
2599 static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
2600 			  const sigset_t __user *sig, size_t sigsz,
2601 			  struct __kernel_timespec __user *uts)
2602 {
2603 	struct io_wait_queue iowq;
2604 	struct io_rings *rings = ctx->rings;
2605 	int ret;
2606 
2607 	if (!io_allowed_run_tw(ctx))
2608 		return -EEXIST;
2609 	if (!llist_empty(&ctx->work_llist))
2610 		io_run_local_work(ctx);
2611 	io_run_task_work();
2612 	io_cqring_overflow_flush(ctx);
2613 	/* if user messes with these they will just get an early return */
2614 	if (__io_cqring_events_user(ctx) >= min_events)
2615 		return 0;
2616 
2617 	if (sig) {
2618 #ifdef CONFIG_COMPAT
2619 		if (in_compat_syscall())
2620 			ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
2621 						      sigsz);
2622 		else
2623 #endif
2624 			ret = set_user_sigmask(sig, sigsz);
2625 
2626 		if (ret)
2627 			return ret;
2628 	}
2629 
2630 	init_waitqueue_func_entry(&iowq.wq, io_wake_function);
2631 	iowq.wq.private = current;
2632 	INIT_LIST_HEAD(&iowq.wq.entry);
2633 	iowq.ctx = ctx;
2634 	iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
2635 	iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
2636 	iowq.timeout = KTIME_MAX;
2637 
2638 	if (uts) {
2639 		struct timespec64 ts;
2640 
2641 		if (get_timespec64(&ts, uts))
2642 			return -EFAULT;
2643 		iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
2644 	}
2645 
2646 	trace_io_uring_cqring_wait(ctx, min_events);
2647 	do {
2648 		unsigned long check_cq;
2649 
2650 		if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
2651 			int nr_wait = (int) iowq.cq_tail - READ_ONCE(ctx->rings->cq.tail);
2652 
2653 			atomic_set(&ctx->cq_wait_nr, nr_wait);
2654 			set_current_state(TASK_INTERRUPTIBLE);
2655 		} else {
2656 			prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
2657 							TASK_INTERRUPTIBLE);
2658 		}
2659 
2660 		ret = io_cqring_wait_schedule(ctx, &iowq);
2661 		__set_current_state(TASK_RUNNING);
2662 		atomic_set(&ctx->cq_wait_nr, 0);
2663 
2664 		if (ret < 0)
2665 			break;
2666 		/*
2667 		 * Run task_work after scheduling and before io_should_wake().
2668 		 * If we got woken because of task_work being processed, run it
2669 		 * now rather than let the caller do another wait loop.
2670 		 */
2671 		io_run_task_work();
2672 		if (!llist_empty(&ctx->work_llist))
2673 			io_run_local_work(ctx);
2674 
2675 		check_cq = READ_ONCE(ctx->check_cq);
2676 		if (unlikely(check_cq)) {
2677 			/* let the caller flush overflows, retry */
2678 			if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
2679 				io_cqring_do_overflow_flush(ctx);
2680 			if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)) {
2681 				ret = -EBADR;
2682 				break;
2683 			}
2684 		}
2685 
2686 		if (io_should_wake(&iowq)) {
2687 			ret = 0;
2688 			break;
2689 		}
2690 		cond_resched();
2691 	} while (1);
2692 
2693 	if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
2694 		finish_wait(&ctx->cq_wait, &iowq.wq);
2695 	restore_saved_sigmask_unless(ret == -EINTR);
2696 
2697 	return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2698 }
2699 
2700 static void io_mem_free(void *ptr)
2701 {
2702 	struct page *page;
2703 
2704 	if (!ptr)
2705 		return;
2706 
2707 	page = virt_to_head_page(ptr);
2708 	if (put_page_testzero(page))
2709 		free_compound_page(page);
2710 }
2711 
2712 static void *io_mem_alloc(size_t size)
2713 {
2714 	gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
2715 
2716 	return (void *) __get_free_pages(gfp, get_order(size));
2717 }
2718 
2719 static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries,
2720 				unsigned int cq_entries, size_t *sq_offset)
2721 {
2722 	struct io_rings *rings;
2723 	size_t off, sq_array_size;
2724 
2725 	off = struct_size(rings, cqes, cq_entries);
2726 	if (off == SIZE_MAX)
2727 		return SIZE_MAX;
2728 	if (ctx->flags & IORING_SETUP_CQE32) {
2729 		if (check_shl_overflow(off, 1, &off))
2730 			return SIZE_MAX;
2731 	}
2732 
2733 #ifdef CONFIG_SMP
2734 	off = ALIGN(off, SMP_CACHE_BYTES);
2735 	if (off == 0)
2736 		return SIZE_MAX;
2737 #endif
2738 
2739 	if (sq_offset)
2740 		*sq_offset = off;
2741 
2742 	sq_array_size = array_size(sizeof(u32), sq_entries);
2743 	if (sq_array_size == SIZE_MAX)
2744 		return SIZE_MAX;
2745 
2746 	if (check_add_overflow(off, sq_array_size, &off))
2747 		return SIZE_MAX;
2748 
2749 	return off;
2750 }
2751 
2752 static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
2753 			       unsigned int eventfd_async)
2754 {
2755 	struct io_ev_fd *ev_fd;
2756 	__s32 __user *fds = arg;
2757 	int fd;
2758 
2759 	ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
2760 					lockdep_is_held(&ctx->uring_lock));
2761 	if (ev_fd)
2762 		return -EBUSY;
2763 
2764 	if (copy_from_user(&fd, fds, sizeof(*fds)))
2765 		return -EFAULT;
2766 
2767 	ev_fd = kmalloc(sizeof(*ev_fd), GFP_KERNEL);
2768 	if (!ev_fd)
2769 		return -ENOMEM;
2770 
2771 	ev_fd->cq_ev_fd = eventfd_ctx_fdget(fd);
2772 	if (IS_ERR(ev_fd->cq_ev_fd)) {
2773 		int ret = PTR_ERR(ev_fd->cq_ev_fd);
2774 		kfree(ev_fd);
2775 		return ret;
2776 	}
2777 
2778 	spin_lock(&ctx->completion_lock);
2779 	ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
2780 	spin_unlock(&ctx->completion_lock);
2781 
2782 	ev_fd->eventfd_async = eventfd_async;
2783 	ctx->has_evfd = true;
2784 	rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
2785 	atomic_set(&ev_fd->refs, 1);
2786 	atomic_set(&ev_fd->ops, 0);
2787 	return 0;
2788 }
2789 
2790 static int io_eventfd_unregister(struct io_ring_ctx *ctx)
2791 {
2792 	struct io_ev_fd *ev_fd;
2793 
2794 	ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
2795 					lockdep_is_held(&ctx->uring_lock));
2796 	if (ev_fd) {
2797 		ctx->has_evfd = false;
2798 		rcu_assign_pointer(ctx->io_ev_fd, NULL);
2799 		if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_FREE_BIT), &ev_fd->ops))
2800 			call_rcu(&ev_fd->rcu, io_eventfd_ops);
2801 		return 0;
2802 	}
2803 
2804 	return -ENXIO;
2805 }
2806 
2807 static void io_req_caches_free(struct io_ring_ctx *ctx)
2808 {
2809 	struct io_kiocb *req;
2810 	int nr = 0;
2811 
2812 	mutex_lock(&ctx->uring_lock);
2813 	io_flush_cached_locked_reqs(ctx, &ctx->submit_state);
2814 
2815 	while (!io_req_cache_empty(ctx)) {
2816 		req = io_extract_req(ctx);
2817 		kmem_cache_free(req_cachep, req);
2818 		nr++;
2819 	}
2820 	if (nr)
2821 		percpu_ref_put_many(&ctx->refs, nr);
2822 	mutex_unlock(&ctx->uring_lock);
2823 }
2824 
2825 static void io_rsrc_node_cache_free(struct io_cache_entry *entry)
2826 {
2827 	kfree(container_of(entry, struct io_rsrc_node, cache));
2828 }
2829 
2830 static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
2831 {
2832 	io_sq_thread_finish(ctx);
2833 	/* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
2834 	if (WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list)))
2835 		return;
2836 
2837 	mutex_lock(&ctx->uring_lock);
2838 	if (ctx->buf_data)
2839 		__io_sqe_buffers_unregister(ctx);
2840 	if (ctx->file_data)
2841 		__io_sqe_files_unregister(ctx);
2842 	io_cqring_overflow_kill(ctx);
2843 	io_eventfd_unregister(ctx);
2844 	io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free);
2845 	io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
2846 	io_destroy_buffers(ctx);
2847 	mutex_unlock(&ctx->uring_lock);
2848 	if (ctx->sq_creds)
2849 		put_cred(ctx->sq_creds);
2850 	if (ctx->submitter_task)
2851 		put_task_struct(ctx->submitter_task);
2852 
2853 	/* there are no registered resources left, nobody uses it */
2854 	if (ctx->rsrc_node)
2855 		io_rsrc_node_destroy(ctx, ctx->rsrc_node);
2856 
2857 	WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
2858 
2859 #if defined(CONFIG_UNIX)
2860 	if (ctx->ring_sock) {
2861 		ctx->ring_sock->file = NULL; /* so that iput() is called */
2862 		sock_release(ctx->ring_sock);
2863 	}
2864 #endif
2865 	WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
2866 
2867 	io_alloc_cache_free(&ctx->rsrc_node_cache, io_rsrc_node_cache_free);
2868 	if (ctx->mm_account) {
2869 		mmdrop(ctx->mm_account);
2870 		ctx->mm_account = NULL;
2871 	}
2872 	io_mem_free(ctx->rings);
2873 	io_mem_free(ctx->sq_sqes);
2874 
2875 	percpu_ref_exit(&ctx->refs);
2876 	free_uid(ctx->user);
2877 	io_req_caches_free(ctx);
2878 	if (ctx->hash_map)
2879 		io_wq_put_hash(ctx->hash_map);
2880 	kfree(ctx->cancel_table.hbs);
2881 	kfree(ctx->cancel_table_locked.hbs);
2882 	kfree(ctx->dummy_ubuf);
2883 	kfree(ctx->io_bl);
2884 	xa_destroy(&ctx->io_bl_xa);
2885 	kfree(ctx);
2886 }
2887 
2888 static __cold void io_activate_pollwq_cb(struct callback_head *cb)
2889 {
2890 	struct io_ring_ctx *ctx = container_of(cb, struct io_ring_ctx,
2891 					       poll_wq_task_work);
2892 
2893 	mutex_lock(&ctx->uring_lock);
2894 	ctx->poll_activated = true;
2895 	mutex_unlock(&ctx->uring_lock);
2896 
2897 	/*
2898 	 * Wake ups for some events between start of polling and activation
2899 	 * might've been lost due to loose synchronisation.
2900 	 */
2901 	wake_up_all(&ctx->poll_wq);
2902 	percpu_ref_put(&ctx->refs);
2903 }
2904 
2905 static __cold void io_activate_pollwq(struct io_ring_ctx *ctx)
2906 {
2907 	spin_lock(&ctx->completion_lock);
2908 	/* already activated or in progress */
2909 	if (ctx->poll_activated || ctx->poll_wq_task_work.func)
2910 		goto out;
2911 	if (WARN_ON_ONCE(!ctx->task_complete))
2912 		goto out;
2913 	if (!ctx->submitter_task)
2914 		goto out;
2915 	/*
2916 	 * with ->submitter_task only the submitter task completes requests, we
2917 	 * only need to sync with it, which is done by injecting a tw
2918 	 */
2919 	init_task_work(&ctx->poll_wq_task_work, io_activate_pollwq_cb);
2920 	percpu_ref_get(&ctx->refs);
2921 	if (task_work_add(ctx->submitter_task, &ctx->poll_wq_task_work, TWA_SIGNAL))
2922 		percpu_ref_put(&ctx->refs);
2923 out:
2924 	spin_unlock(&ctx->completion_lock);
2925 }
2926 
2927 static __poll_t io_uring_poll(struct file *file, poll_table *wait)
2928 {
2929 	struct io_ring_ctx *ctx = file->private_data;
2930 	__poll_t mask = 0;
2931 
2932 	if (unlikely(!ctx->poll_activated))
2933 		io_activate_pollwq(ctx);
2934 
2935 	poll_wait(file, &ctx->poll_wq, wait);
2936 	/*
2937 	 * synchronizes with barrier from wq_has_sleeper call in
2938 	 * io_commit_cqring
2939 	 */
2940 	smp_rmb();
2941 	if (!io_sqring_full(ctx))
2942 		mask |= EPOLLOUT | EPOLLWRNORM;
2943 
2944 	/*
2945 	 * Don't flush cqring overflow list here, just do a simple check.
2946 	 * Otherwise there could possible be ABBA deadlock:
2947 	 *      CPU0                    CPU1
2948 	 *      ----                    ----
2949 	 * lock(&ctx->uring_lock);
2950 	 *                              lock(&ep->mtx);
2951 	 *                              lock(&ctx->uring_lock);
2952 	 * lock(&ep->mtx);
2953 	 *
2954 	 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
2955 	 * pushes them to do the flush.
2956 	 */
2957 
2958 	if (__io_cqring_events_user(ctx) || io_has_work(ctx))
2959 		mask |= EPOLLIN | EPOLLRDNORM;
2960 
2961 	return mask;
2962 }
2963 
2964 static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
2965 {
2966 	const struct cred *creds;
2967 
2968 	creds = xa_erase(&ctx->personalities, id);
2969 	if (creds) {
2970 		put_cred(creds);
2971 		return 0;
2972 	}
2973 
2974 	return -EINVAL;
2975 }
2976 
2977 struct io_tctx_exit {
2978 	struct callback_head		task_work;
2979 	struct completion		completion;
2980 	struct io_ring_ctx		*ctx;
2981 };
2982 
2983 static __cold void io_tctx_exit_cb(struct callback_head *cb)
2984 {
2985 	struct io_uring_task *tctx = current->io_uring;
2986 	struct io_tctx_exit *work;
2987 
2988 	work = container_of(cb, struct io_tctx_exit, task_work);
2989 	/*
2990 	 * When @in_cancel, we're in cancellation and it's racy to remove the
2991 	 * node. It'll be removed by the end of cancellation, just ignore it.
2992 	 * tctx can be NULL if the queueing of this task_work raced with
2993 	 * work cancelation off the exec path.
2994 	 */
2995 	if (tctx && !atomic_read(&tctx->in_cancel))
2996 		io_uring_del_tctx_node((unsigned long)work->ctx);
2997 	complete(&work->completion);
2998 }
2999 
3000 static __cold bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
3001 {
3002 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
3003 
3004 	return req->ctx == data;
3005 }
3006 
3007 static __cold void io_ring_exit_work(struct work_struct *work)
3008 {
3009 	struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
3010 	unsigned long timeout = jiffies + HZ * 60 * 5;
3011 	unsigned long interval = HZ / 20;
3012 	struct io_tctx_exit exit;
3013 	struct io_tctx_node *node;
3014 	int ret;
3015 
3016 	/*
3017 	 * If we're doing polled IO and end up having requests being
3018 	 * submitted async (out-of-line), then completions can come in while
3019 	 * we're waiting for refs to drop. We need to reap these manually,
3020 	 * as nobody else will be looking for them.
3021 	 */
3022 	do {
3023 		if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
3024 			mutex_lock(&ctx->uring_lock);
3025 			io_cqring_overflow_kill(ctx);
3026 			mutex_unlock(&ctx->uring_lock);
3027 		}
3028 
3029 		if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
3030 			io_move_task_work_from_local(ctx);
3031 
3032 		while (io_uring_try_cancel_requests(ctx, NULL, true))
3033 			cond_resched();
3034 
3035 		if (ctx->sq_data) {
3036 			struct io_sq_data *sqd = ctx->sq_data;
3037 			struct task_struct *tsk;
3038 
3039 			io_sq_thread_park(sqd);
3040 			tsk = sqd->thread;
3041 			if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
3042 				io_wq_cancel_cb(tsk->io_uring->io_wq,
3043 						io_cancel_ctx_cb, ctx, true);
3044 			io_sq_thread_unpark(sqd);
3045 		}
3046 
3047 		io_req_caches_free(ctx);
3048 
3049 		if (WARN_ON_ONCE(time_after(jiffies, timeout))) {
3050 			/* there is little hope left, don't run it too often */
3051 			interval = HZ * 60;
3052 		}
3053 	} while (!wait_for_completion_timeout(&ctx->ref_comp, interval));
3054 
3055 	init_completion(&exit.completion);
3056 	init_task_work(&exit.task_work, io_tctx_exit_cb);
3057 	exit.ctx = ctx;
3058 	/*
3059 	 * Some may use context even when all refs and requests have been put,
3060 	 * and they are free to do so while still holding uring_lock or
3061 	 * completion_lock, see io_req_task_submit(). Apart from other work,
3062 	 * this lock/unlock section also waits them to finish.
3063 	 */
3064 	mutex_lock(&ctx->uring_lock);
3065 	while (!list_empty(&ctx->tctx_list)) {
3066 		WARN_ON_ONCE(time_after(jiffies, timeout));
3067 
3068 		node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
3069 					ctx_node);
3070 		/* don't spin on a single task if cancellation failed */
3071 		list_rotate_left(&ctx->tctx_list);
3072 		ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
3073 		if (WARN_ON_ONCE(ret))
3074 			continue;
3075 
3076 		mutex_unlock(&ctx->uring_lock);
3077 		wait_for_completion(&exit.completion);
3078 		mutex_lock(&ctx->uring_lock);
3079 	}
3080 	mutex_unlock(&ctx->uring_lock);
3081 	spin_lock(&ctx->completion_lock);
3082 	spin_unlock(&ctx->completion_lock);
3083 
3084 	/* pairs with RCU read section in io_req_local_work_add() */
3085 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
3086 		synchronize_rcu();
3087 
3088 	io_ring_ctx_free(ctx);
3089 }
3090 
3091 static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
3092 {
3093 	unsigned long index;
3094 	struct creds *creds;
3095 
3096 	mutex_lock(&ctx->uring_lock);
3097 	percpu_ref_kill(&ctx->refs);
3098 	xa_for_each(&ctx->personalities, index, creds)
3099 		io_unregister_personality(ctx, index);
3100 	if (ctx->rings)
3101 		io_poll_remove_all(ctx, NULL, true);
3102 	mutex_unlock(&ctx->uring_lock);
3103 
3104 	/*
3105 	 * If we failed setting up the ctx, we might not have any rings
3106 	 * and therefore did not submit any requests
3107 	 */
3108 	if (ctx->rings)
3109 		io_kill_timeouts(ctx, NULL, true);
3110 
3111 	INIT_WORK(&ctx->exit_work, io_ring_exit_work);
3112 	/*
3113 	 * Use system_unbound_wq to avoid spawning tons of event kworkers
3114 	 * if we're exiting a ton of rings at the same time. It just adds
3115 	 * noise and overhead, there's no discernable change in runtime
3116 	 * over using system_wq.
3117 	 */
3118 	queue_work(system_unbound_wq, &ctx->exit_work);
3119 }
3120 
3121 static int io_uring_release(struct inode *inode, struct file *file)
3122 {
3123 	struct io_ring_ctx *ctx = file->private_data;
3124 
3125 	file->private_data = NULL;
3126 	io_ring_ctx_wait_and_kill(ctx);
3127 	return 0;
3128 }
3129 
3130 struct io_task_cancel {
3131 	struct task_struct *task;
3132 	bool all;
3133 };
3134 
3135 static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
3136 {
3137 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
3138 	struct io_task_cancel *cancel = data;
3139 
3140 	return io_match_task_safe(req, cancel->task, cancel->all);
3141 }
3142 
3143 static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
3144 					 struct task_struct *task,
3145 					 bool cancel_all)
3146 {
3147 	struct io_defer_entry *de;
3148 	LIST_HEAD(list);
3149 
3150 	spin_lock(&ctx->completion_lock);
3151 	list_for_each_entry_reverse(de, &ctx->defer_list, list) {
3152 		if (io_match_task_safe(de->req, task, cancel_all)) {
3153 			list_cut_position(&list, &ctx->defer_list, &de->list);
3154 			break;
3155 		}
3156 	}
3157 	spin_unlock(&ctx->completion_lock);
3158 	if (list_empty(&list))
3159 		return false;
3160 
3161 	while (!list_empty(&list)) {
3162 		de = list_first_entry(&list, struct io_defer_entry, list);
3163 		list_del_init(&de->list);
3164 		io_req_task_queue_fail(de->req, -ECANCELED);
3165 		kfree(de);
3166 	}
3167 	return true;
3168 }
3169 
3170 static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
3171 {
3172 	struct io_tctx_node *node;
3173 	enum io_wq_cancel cret;
3174 	bool ret = false;
3175 
3176 	mutex_lock(&ctx->uring_lock);
3177 	list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
3178 		struct io_uring_task *tctx = node->task->io_uring;
3179 
3180 		/*
3181 		 * io_wq will stay alive while we hold uring_lock, because it's
3182 		 * killed after ctx nodes, which requires to take the lock.
3183 		 */
3184 		if (!tctx || !tctx->io_wq)
3185 			continue;
3186 		cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
3187 		ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
3188 	}
3189 	mutex_unlock(&ctx->uring_lock);
3190 
3191 	return ret;
3192 }
3193 
3194 static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
3195 						struct task_struct *task,
3196 						bool cancel_all)
3197 {
3198 	struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
3199 	struct io_uring_task *tctx = task ? task->io_uring : NULL;
3200 	enum io_wq_cancel cret;
3201 	bool ret = false;
3202 
3203 	/* set it so io_req_local_work_add() would wake us up */
3204 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
3205 		atomic_set(&ctx->cq_wait_nr, 1);
3206 		smp_mb();
3207 	}
3208 
3209 	/* failed during ring init, it couldn't have issued any requests */
3210 	if (!ctx->rings)
3211 		return false;
3212 
3213 	if (!task) {
3214 		ret |= io_uring_try_cancel_iowq(ctx);
3215 	} else if (tctx && tctx->io_wq) {
3216 		/*
3217 		 * Cancels requests of all rings, not only @ctx, but
3218 		 * it's fine as the task is in exit/exec.
3219 		 */
3220 		cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
3221 				       &cancel, true);
3222 		ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
3223 	}
3224 
3225 	/* SQPOLL thread does its own polling */
3226 	if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
3227 	    (ctx->sq_data && ctx->sq_data->thread == current)) {
3228 		while (!wq_list_empty(&ctx->iopoll_list)) {
3229 			io_iopoll_try_reap_events(ctx);
3230 			ret = true;
3231 			cond_resched();
3232 		}
3233 	}
3234 
3235 	if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
3236 	    io_allowed_defer_tw_run(ctx))
3237 		ret |= io_run_local_work(ctx) > 0;
3238 	ret |= io_cancel_defer_files(ctx, task, cancel_all);
3239 	mutex_lock(&ctx->uring_lock);
3240 	ret |= io_poll_remove_all(ctx, task, cancel_all);
3241 	mutex_unlock(&ctx->uring_lock);
3242 	ret |= io_kill_timeouts(ctx, task, cancel_all);
3243 	if (task)
3244 		ret |= io_run_task_work() > 0;
3245 	return ret;
3246 }
3247 
3248 static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
3249 {
3250 	if (tracked)
3251 		return atomic_read(&tctx->inflight_tracked);
3252 	return percpu_counter_sum(&tctx->inflight);
3253 }
3254 
3255 /*
3256  * Find any io_uring ctx that this task has registered or done IO on, and cancel
3257  * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
3258  */
3259 __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
3260 {
3261 	struct io_uring_task *tctx = current->io_uring;
3262 	struct io_ring_ctx *ctx;
3263 	struct io_tctx_node *node;
3264 	unsigned long index;
3265 	s64 inflight;
3266 	DEFINE_WAIT(wait);
3267 
3268 	WARN_ON_ONCE(sqd && sqd->thread != current);
3269 
3270 	if (!current->io_uring)
3271 		return;
3272 	if (tctx->io_wq)
3273 		io_wq_exit_start(tctx->io_wq);
3274 
3275 	atomic_inc(&tctx->in_cancel);
3276 	do {
3277 		bool loop = false;
3278 
3279 		io_uring_drop_tctx_refs(current);
3280 		/* read completions before cancelations */
3281 		inflight = tctx_inflight(tctx, !cancel_all);
3282 		if (!inflight)
3283 			break;
3284 
3285 		if (!sqd) {
3286 			xa_for_each(&tctx->xa, index, node) {
3287 				/* sqpoll task will cancel all its requests */
3288 				if (node->ctx->sq_data)
3289 					continue;
3290 				loop |= io_uring_try_cancel_requests(node->ctx,
3291 							current, cancel_all);
3292 			}
3293 		} else {
3294 			list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
3295 				loop |= io_uring_try_cancel_requests(ctx,
3296 								     current,
3297 								     cancel_all);
3298 		}
3299 
3300 		if (loop) {
3301 			cond_resched();
3302 			continue;
3303 		}
3304 
3305 		prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
3306 		io_run_task_work();
3307 		io_uring_drop_tctx_refs(current);
3308 		xa_for_each(&tctx->xa, index, node) {
3309 			if (!llist_empty(&node->ctx->work_llist)) {
3310 				WARN_ON_ONCE(node->ctx->submitter_task &&
3311 					     node->ctx->submitter_task != current);
3312 				goto end_wait;
3313 			}
3314 		}
3315 		/*
3316 		 * If we've seen completions, retry without waiting. This
3317 		 * avoids a race where a completion comes in before we did
3318 		 * prepare_to_wait().
3319 		 */
3320 		if (inflight == tctx_inflight(tctx, !cancel_all))
3321 			schedule();
3322 end_wait:
3323 		finish_wait(&tctx->wait, &wait);
3324 	} while (1);
3325 
3326 	io_uring_clean_tctx(tctx);
3327 	if (cancel_all) {
3328 		/*
3329 		 * We shouldn't run task_works after cancel, so just leave
3330 		 * ->in_cancel set for normal exit.
3331 		 */
3332 		atomic_dec(&tctx->in_cancel);
3333 		/* for exec all current's requests should be gone, kill tctx */
3334 		__io_uring_free(current);
3335 	}
3336 }
3337 
3338 void __io_uring_cancel(bool cancel_all)
3339 {
3340 	io_uring_cancel_generic(cancel_all, NULL);
3341 }
3342 
3343 static void *io_uring_validate_mmap_request(struct file *file,
3344 					    loff_t pgoff, size_t sz)
3345 {
3346 	struct io_ring_ctx *ctx = file->private_data;
3347 	loff_t offset = pgoff << PAGE_SHIFT;
3348 	struct page *page;
3349 	void *ptr;
3350 
3351 	switch (offset & IORING_OFF_MMAP_MASK) {
3352 	case IORING_OFF_SQ_RING:
3353 	case IORING_OFF_CQ_RING:
3354 		ptr = ctx->rings;
3355 		break;
3356 	case IORING_OFF_SQES:
3357 		ptr = ctx->sq_sqes;
3358 		break;
3359 	case IORING_OFF_PBUF_RING: {
3360 		unsigned int bgid;
3361 
3362 		bgid = (offset & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT;
3363 		mutex_lock(&ctx->uring_lock);
3364 		ptr = io_pbuf_get_address(ctx, bgid);
3365 		mutex_unlock(&ctx->uring_lock);
3366 		if (!ptr)
3367 			return ERR_PTR(-EINVAL);
3368 		break;
3369 		}
3370 	default:
3371 		return ERR_PTR(-EINVAL);
3372 	}
3373 
3374 	page = virt_to_head_page(ptr);
3375 	if (sz > page_size(page))
3376 		return ERR_PTR(-EINVAL);
3377 
3378 	return ptr;
3379 }
3380 
3381 #ifdef CONFIG_MMU
3382 
3383 static __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
3384 {
3385 	size_t sz = vma->vm_end - vma->vm_start;
3386 	unsigned long pfn;
3387 	void *ptr;
3388 
3389 	ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
3390 	if (IS_ERR(ptr))
3391 		return PTR_ERR(ptr);
3392 
3393 	pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
3394 	return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
3395 }
3396 
3397 static unsigned long io_uring_mmu_get_unmapped_area(struct file *filp,
3398 			unsigned long addr, unsigned long len,
3399 			unsigned long pgoff, unsigned long flags)
3400 {
3401 	const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
3402 	struct vm_unmapped_area_info info;
3403 	void *ptr;
3404 
3405 	/*
3406 	 * Do not allow to map to user-provided address to avoid breaking the
3407 	 * aliasing rules. Userspace is not able to guess the offset address of
3408 	 * kernel kmalloc()ed memory area.
3409 	 */
3410 	if (addr)
3411 		return -EINVAL;
3412 
3413 	ptr = io_uring_validate_mmap_request(filp, pgoff, len);
3414 	if (IS_ERR(ptr))
3415 		return -ENOMEM;
3416 
3417 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
3418 	info.length = len;
3419 	info.low_limit = max(PAGE_SIZE, mmap_min_addr);
3420 	info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base);
3421 #ifdef SHM_COLOUR
3422 	info.align_mask = PAGE_MASK & (SHM_COLOUR - 1UL);
3423 #else
3424 	info.align_mask = PAGE_MASK & (SHMLBA - 1UL);
3425 #endif
3426 	info.align_offset = (unsigned long) ptr;
3427 
3428 	/*
3429 	 * A failed mmap() very likely causes application failure,
3430 	 * so fall back to the bottom-up function here. This scenario
3431 	 * can happen with large stack limits and large mmap()
3432 	 * allocations.
3433 	 */
3434 	addr = vm_unmapped_area(&info);
3435 	if (offset_in_page(addr)) {
3436 		info.flags = 0;
3437 		info.low_limit = TASK_UNMAPPED_BASE;
3438 		info.high_limit = mmap_end;
3439 		addr = vm_unmapped_area(&info);
3440 	}
3441 
3442 	return addr;
3443 }
3444 
3445 #else /* !CONFIG_MMU */
3446 
3447 static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
3448 {
3449 	return is_nommu_shared_mapping(vma->vm_flags) ? 0 : -EINVAL;
3450 }
3451 
3452 static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
3453 {
3454 	return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
3455 }
3456 
3457 static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
3458 	unsigned long addr, unsigned long len,
3459 	unsigned long pgoff, unsigned long flags)
3460 {
3461 	void *ptr;
3462 
3463 	ptr = io_uring_validate_mmap_request(file, pgoff, len);
3464 	if (IS_ERR(ptr))
3465 		return PTR_ERR(ptr);
3466 
3467 	return (unsigned long) ptr;
3468 }
3469 
3470 #endif /* !CONFIG_MMU */
3471 
3472 static int io_validate_ext_arg(unsigned flags, const void __user *argp, size_t argsz)
3473 {
3474 	if (flags & IORING_ENTER_EXT_ARG) {
3475 		struct io_uring_getevents_arg arg;
3476 
3477 		if (argsz != sizeof(arg))
3478 			return -EINVAL;
3479 		if (copy_from_user(&arg, argp, sizeof(arg)))
3480 			return -EFAULT;
3481 	}
3482 	return 0;
3483 }
3484 
3485 static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
3486 			  struct __kernel_timespec __user **ts,
3487 			  const sigset_t __user **sig)
3488 {
3489 	struct io_uring_getevents_arg arg;
3490 
3491 	/*
3492 	 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
3493 	 * is just a pointer to the sigset_t.
3494 	 */
3495 	if (!(flags & IORING_ENTER_EXT_ARG)) {
3496 		*sig = (const sigset_t __user *) argp;
3497 		*ts = NULL;
3498 		return 0;
3499 	}
3500 
3501 	/*
3502 	 * EXT_ARG is set - ensure we agree on the size of it and copy in our
3503 	 * timespec and sigset_t pointers if good.
3504 	 */
3505 	if (*argsz != sizeof(arg))
3506 		return -EINVAL;
3507 	if (copy_from_user(&arg, argp, sizeof(arg)))
3508 		return -EFAULT;
3509 	if (arg.pad)
3510 		return -EINVAL;
3511 	*sig = u64_to_user_ptr(arg.sigmask);
3512 	*argsz = arg.sigmask_sz;
3513 	*ts = u64_to_user_ptr(arg.ts);
3514 	return 0;
3515 }
3516 
3517 SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
3518 		u32, min_complete, u32, flags, const void __user *, argp,
3519 		size_t, argsz)
3520 {
3521 	struct io_ring_ctx *ctx;
3522 	struct fd f;
3523 	long ret;
3524 
3525 	if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
3526 			       IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG |
3527 			       IORING_ENTER_REGISTERED_RING)))
3528 		return -EINVAL;
3529 
3530 	/*
3531 	 * Ring fd has been registered via IORING_REGISTER_RING_FDS, we
3532 	 * need only dereference our task private array to find it.
3533 	 */
3534 	if (flags & IORING_ENTER_REGISTERED_RING) {
3535 		struct io_uring_task *tctx = current->io_uring;
3536 
3537 		if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX))
3538 			return -EINVAL;
3539 		fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
3540 		f.file = tctx->registered_rings[fd];
3541 		f.flags = 0;
3542 		if (unlikely(!f.file))
3543 			return -EBADF;
3544 	} else {
3545 		f = fdget(fd);
3546 		if (unlikely(!f.file))
3547 			return -EBADF;
3548 		ret = -EOPNOTSUPP;
3549 		if (unlikely(!io_is_uring_fops(f.file)))
3550 			goto out;
3551 	}
3552 
3553 	ctx = f.file->private_data;
3554 	ret = -EBADFD;
3555 	if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
3556 		goto out;
3557 
3558 	/*
3559 	 * For SQ polling, the thread will do all submissions and completions.
3560 	 * Just return the requested submit count, and wake the thread if
3561 	 * we were asked to.
3562 	 */
3563 	ret = 0;
3564 	if (ctx->flags & IORING_SETUP_SQPOLL) {
3565 		io_cqring_overflow_flush(ctx);
3566 
3567 		if (unlikely(ctx->sq_data->thread == NULL)) {
3568 			ret = -EOWNERDEAD;
3569 			goto out;
3570 		}
3571 		if (flags & IORING_ENTER_SQ_WAKEUP)
3572 			wake_up(&ctx->sq_data->wait);
3573 		if (flags & IORING_ENTER_SQ_WAIT)
3574 			io_sqpoll_wait_sq(ctx);
3575 
3576 		ret = to_submit;
3577 	} else if (to_submit) {
3578 		ret = io_uring_add_tctx_node(ctx);
3579 		if (unlikely(ret))
3580 			goto out;
3581 
3582 		mutex_lock(&ctx->uring_lock);
3583 		ret = io_submit_sqes(ctx, to_submit);
3584 		if (ret != to_submit) {
3585 			mutex_unlock(&ctx->uring_lock);
3586 			goto out;
3587 		}
3588 		if (flags & IORING_ENTER_GETEVENTS) {
3589 			if (ctx->syscall_iopoll)
3590 				goto iopoll_locked;
3591 			/*
3592 			 * Ignore errors, we'll soon call io_cqring_wait() and
3593 			 * it should handle ownership problems if any.
3594 			 */
3595 			if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
3596 				(void)io_run_local_work_locked(ctx);
3597 		}
3598 		mutex_unlock(&ctx->uring_lock);
3599 	}
3600 
3601 	if (flags & IORING_ENTER_GETEVENTS) {
3602 		int ret2;
3603 
3604 		if (ctx->syscall_iopoll) {
3605 			/*
3606 			 * We disallow the app entering submit/complete with
3607 			 * polling, but we still need to lock the ring to
3608 			 * prevent racing with polled issue that got punted to
3609 			 * a workqueue.
3610 			 */
3611 			mutex_lock(&ctx->uring_lock);
3612 iopoll_locked:
3613 			ret2 = io_validate_ext_arg(flags, argp, argsz);
3614 			if (likely(!ret2)) {
3615 				min_complete = min(min_complete,
3616 						   ctx->cq_entries);
3617 				ret2 = io_iopoll_check(ctx, min_complete);
3618 			}
3619 			mutex_unlock(&ctx->uring_lock);
3620 		} else {
3621 			const sigset_t __user *sig;
3622 			struct __kernel_timespec __user *ts;
3623 
3624 			ret2 = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
3625 			if (likely(!ret2)) {
3626 				min_complete = min(min_complete,
3627 						   ctx->cq_entries);
3628 				ret2 = io_cqring_wait(ctx, min_complete, sig,
3629 						      argsz, ts);
3630 			}
3631 		}
3632 
3633 		if (!ret) {
3634 			ret = ret2;
3635 
3636 			/*
3637 			 * EBADR indicates that one or more CQE were dropped.
3638 			 * Once the user has been informed we can clear the bit
3639 			 * as they are obviously ok with those drops.
3640 			 */
3641 			if (unlikely(ret2 == -EBADR))
3642 				clear_bit(IO_CHECK_CQ_DROPPED_BIT,
3643 					  &ctx->check_cq);
3644 		}
3645 	}
3646 out:
3647 	fdput(f);
3648 	return ret;
3649 }
3650 
3651 static const struct file_operations io_uring_fops = {
3652 	.release	= io_uring_release,
3653 	.mmap		= io_uring_mmap,
3654 #ifndef CONFIG_MMU
3655 	.get_unmapped_area = io_uring_nommu_get_unmapped_area,
3656 	.mmap_capabilities = io_uring_nommu_mmap_capabilities,
3657 #else
3658 	.get_unmapped_area = io_uring_mmu_get_unmapped_area,
3659 #endif
3660 	.poll		= io_uring_poll,
3661 #ifdef CONFIG_PROC_FS
3662 	.show_fdinfo	= io_uring_show_fdinfo,
3663 #endif
3664 };
3665 
3666 bool io_is_uring_fops(struct file *file)
3667 {
3668 	return file->f_op == &io_uring_fops;
3669 }
3670 
3671 static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
3672 					 struct io_uring_params *p)
3673 {
3674 	struct io_rings *rings;
3675 	size_t size, sq_array_offset;
3676 
3677 	/* make sure these are sane, as we already accounted them */
3678 	ctx->sq_entries = p->sq_entries;
3679 	ctx->cq_entries = p->cq_entries;
3680 
3681 	size = rings_size(ctx, p->sq_entries, p->cq_entries, &sq_array_offset);
3682 	if (size == SIZE_MAX)
3683 		return -EOVERFLOW;
3684 
3685 	rings = io_mem_alloc(size);
3686 	if (!rings)
3687 		return -ENOMEM;
3688 
3689 	ctx->rings = rings;
3690 	ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
3691 	rings->sq_ring_mask = p->sq_entries - 1;
3692 	rings->cq_ring_mask = p->cq_entries - 1;
3693 	rings->sq_ring_entries = p->sq_entries;
3694 	rings->cq_ring_entries = p->cq_entries;
3695 
3696 	if (p->flags & IORING_SETUP_SQE128)
3697 		size = array_size(2 * sizeof(struct io_uring_sqe), p->sq_entries);
3698 	else
3699 		size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
3700 	if (size == SIZE_MAX) {
3701 		io_mem_free(ctx->rings);
3702 		ctx->rings = NULL;
3703 		return -EOVERFLOW;
3704 	}
3705 
3706 	ctx->sq_sqes = io_mem_alloc(size);
3707 	if (!ctx->sq_sqes) {
3708 		io_mem_free(ctx->rings);
3709 		ctx->rings = NULL;
3710 		return -ENOMEM;
3711 	}
3712 
3713 	return 0;
3714 }
3715 
3716 static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
3717 {
3718 	int ret, fd;
3719 
3720 	fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
3721 	if (fd < 0)
3722 		return fd;
3723 
3724 	ret = __io_uring_add_tctx_node(ctx);
3725 	if (ret) {
3726 		put_unused_fd(fd);
3727 		return ret;
3728 	}
3729 	fd_install(fd, file);
3730 	return fd;
3731 }
3732 
3733 /*
3734  * Allocate an anonymous fd, this is what constitutes the application
3735  * visible backing of an io_uring instance. The application mmaps this
3736  * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
3737  * we have to tie this fd to a socket for file garbage collection purposes.
3738  */
3739 static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
3740 {
3741 	struct file *file;
3742 #if defined(CONFIG_UNIX)
3743 	int ret;
3744 
3745 	ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
3746 				&ctx->ring_sock);
3747 	if (ret)
3748 		return ERR_PTR(ret);
3749 #endif
3750 
3751 	file = anon_inode_getfile_secure("[io_uring]", &io_uring_fops, ctx,
3752 					 O_RDWR | O_CLOEXEC, NULL);
3753 #if defined(CONFIG_UNIX)
3754 	if (IS_ERR(file)) {
3755 		sock_release(ctx->ring_sock);
3756 		ctx->ring_sock = NULL;
3757 	} else {
3758 		ctx->ring_sock->file = file;
3759 	}
3760 #endif
3761 	return file;
3762 }
3763 
3764 static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
3765 				  struct io_uring_params __user *params)
3766 {
3767 	struct io_ring_ctx *ctx;
3768 	struct file *file;
3769 	int ret;
3770 
3771 	if (!entries)
3772 		return -EINVAL;
3773 	if (entries > IORING_MAX_ENTRIES) {
3774 		if (!(p->flags & IORING_SETUP_CLAMP))
3775 			return -EINVAL;
3776 		entries = IORING_MAX_ENTRIES;
3777 	}
3778 
3779 	/*
3780 	 * Use twice as many entries for the CQ ring. It's possible for the
3781 	 * application to drive a higher depth than the size of the SQ ring,
3782 	 * since the sqes are only used at submission time. This allows for
3783 	 * some flexibility in overcommitting a bit. If the application has
3784 	 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
3785 	 * of CQ ring entries manually.
3786 	 */
3787 	p->sq_entries = roundup_pow_of_two(entries);
3788 	if (p->flags & IORING_SETUP_CQSIZE) {
3789 		/*
3790 		 * If IORING_SETUP_CQSIZE is set, we do the same roundup
3791 		 * to a power-of-two, if it isn't already. We do NOT impose
3792 		 * any cq vs sq ring sizing.
3793 		 */
3794 		if (!p->cq_entries)
3795 			return -EINVAL;
3796 		if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
3797 			if (!(p->flags & IORING_SETUP_CLAMP))
3798 				return -EINVAL;
3799 			p->cq_entries = IORING_MAX_CQ_ENTRIES;
3800 		}
3801 		p->cq_entries = roundup_pow_of_two(p->cq_entries);
3802 		if (p->cq_entries < p->sq_entries)
3803 			return -EINVAL;
3804 	} else {
3805 		p->cq_entries = 2 * p->sq_entries;
3806 	}
3807 
3808 	ctx = io_ring_ctx_alloc(p);
3809 	if (!ctx)
3810 		return -ENOMEM;
3811 
3812 	if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
3813 	    !(ctx->flags & IORING_SETUP_IOPOLL) &&
3814 	    !(ctx->flags & IORING_SETUP_SQPOLL))
3815 		ctx->task_complete = true;
3816 
3817 	/*
3818 	 * lazy poll_wq activation relies on ->task_complete for synchronisation
3819 	 * purposes, see io_activate_pollwq()
3820 	 */
3821 	if (!ctx->task_complete)
3822 		ctx->poll_activated = true;
3823 
3824 	/*
3825 	 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
3826 	 * space applications don't need to do io completion events
3827 	 * polling again, they can rely on io_sq_thread to do polling
3828 	 * work, which can reduce cpu usage and uring_lock contention.
3829 	 */
3830 	if (ctx->flags & IORING_SETUP_IOPOLL &&
3831 	    !(ctx->flags & IORING_SETUP_SQPOLL))
3832 		ctx->syscall_iopoll = 1;
3833 
3834 	ctx->compat = in_compat_syscall();
3835 	if (!capable(CAP_IPC_LOCK))
3836 		ctx->user = get_uid(current_user());
3837 
3838 	/*
3839 	 * For SQPOLL, we just need a wakeup, always. For !SQPOLL, if
3840 	 * COOP_TASKRUN is set, then IPIs are never needed by the app.
3841 	 */
3842 	ret = -EINVAL;
3843 	if (ctx->flags & IORING_SETUP_SQPOLL) {
3844 		/* IPI related flags don't make sense with SQPOLL */
3845 		if (ctx->flags & (IORING_SETUP_COOP_TASKRUN |
3846 				  IORING_SETUP_TASKRUN_FLAG |
3847 				  IORING_SETUP_DEFER_TASKRUN))
3848 			goto err;
3849 		ctx->notify_method = TWA_SIGNAL_NO_IPI;
3850 	} else if (ctx->flags & IORING_SETUP_COOP_TASKRUN) {
3851 		ctx->notify_method = TWA_SIGNAL_NO_IPI;
3852 	} else {
3853 		if (ctx->flags & IORING_SETUP_TASKRUN_FLAG &&
3854 		    !(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
3855 			goto err;
3856 		ctx->notify_method = TWA_SIGNAL;
3857 	}
3858 
3859 	/*
3860 	 * For DEFER_TASKRUN we require the completion task to be the same as the
3861 	 * submission task. This implies that there is only one submitter, so enforce
3862 	 * that.
3863 	 */
3864 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN &&
3865 	    !(ctx->flags & IORING_SETUP_SINGLE_ISSUER)) {
3866 		goto err;
3867 	}
3868 
3869 	/*
3870 	 * This is just grabbed for accounting purposes. When a process exits,
3871 	 * the mm is exited and dropped before the files, hence we need to hang
3872 	 * on to this mm purely for the purposes of being able to unaccount
3873 	 * memory (locked/pinned vm). It's not used for anything else.
3874 	 */
3875 	mmgrab(current->mm);
3876 	ctx->mm_account = current->mm;
3877 
3878 	ret = io_allocate_scq_urings(ctx, p);
3879 	if (ret)
3880 		goto err;
3881 
3882 	ret = io_sq_offload_create(ctx, p);
3883 	if (ret)
3884 		goto err;
3885 
3886 	ret = io_rsrc_init(ctx);
3887 	if (ret)
3888 		goto err;
3889 
3890 	memset(&p->sq_off, 0, sizeof(p->sq_off));
3891 	p->sq_off.head = offsetof(struct io_rings, sq.head);
3892 	p->sq_off.tail = offsetof(struct io_rings, sq.tail);
3893 	p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
3894 	p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
3895 	p->sq_off.flags = offsetof(struct io_rings, sq_flags);
3896 	p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
3897 	p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
3898 
3899 	memset(&p->cq_off, 0, sizeof(p->cq_off));
3900 	p->cq_off.head = offsetof(struct io_rings, cq.head);
3901 	p->cq_off.tail = offsetof(struct io_rings, cq.tail);
3902 	p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
3903 	p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
3904 	p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
3905 	p->cq_off.cqes = offsetof(struct io_rings, cqes);
3906 	p->cq_off.flags = offsetof(struct io_rings, cq_flags);
3907 
3908 	p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
3909 			IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
3910 			IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
3911 			IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
3912 			IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
3913 			IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP |
3914 			IORING_FEAT_LINKED_FILE | IORING_FEAT_REG_REG_RING;
3915 
3916 	if (copy_to_user(params, p, sizeof(*p))) {
3917 		ret = -EFAULT;
3918 		goto err;
3919 	}
3920 
3921 	if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
3922 	    && !(ctx->flags & IORING_SETUP_R_DISABLED))
3923 		WRITE_ONCE(ctx->submitter_task, get_task_struct(current));
3924 
3925 	file = io_uring_get_file(ctx);
3926 	if (IS_ERR(file)) {
3927 		ret = PTR_ERR(file);
3928 		goto err;
3929 	}
3930 
3931 	/*
3932 	 * Install ring fd as the very last thing, so we don't risk someone
3933 	 * having closed it before we finish setup
3934 	 */
3935 	ret = io_uring_install_fd(ctx, file);
3936 	if (ret < 0) {
3937 		/* fput will clean it up */
3938 		fput(file);
3939 		return ret;
3940 	}
3941 
3942 	trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
3943 	return ret;
3944 err:
3945 	io_ring_ctx_wait_and_kill(ctx);
3946 	return ret;
3947 }
3948 
3949 /*
3950  * Sets up an aio uring context, and returns the fd. Applications asks for a
3951  * ring size, we return the actual sq/cq ring sizes (among other things) in the
3952  * params structure passed in.
3953  */
3954 static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
3955 {
3956 	struct io_uring_params p;
3957 	int i;
3958 
3959 	if (copy_from_user(&p, params, sizeof(p)))
3960 		return -EFAULT;
3961 	for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
3962 		if (p.resv[i])
3963 			return -EINVAL;
3964 	}
3965 
3966 	if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
3967 			IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
3968 			IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
3969 			IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL |
3970 			IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG |
3971 			IORING_SETUP_SQE128 | IORING_SETUP_CQE32 |
3972 			IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN))
3973 		return -EINVAL;
3974 
3975 	return io_uring_create(entries, &p, params);
3976 }
3977 
3978 SYSCALL_DEFINE2(io_uring_setup, u32, entries,
3979 		struct io_uring_params __user *, params)
3980 {
3981 	return io_uring_setup(entries, params);
3982 }
3983 
3984 static __cold int io_probe(struct io_ring_ctx *ctx, void __user *arg,
3985 			   unsigned nr_args)
3986 {
3987 	struct io_uring_probe *p;
3988 	size_t size;
3989 	int i, ret;
3990 
3991 	size = struct_size(p, ops, nr_args);
3992 	if (size == SIZE_MAX)
3993 		return -EOVERFLOW;
3994 	p = kzalloc(size, GFP_KERNEL);
3995 	if (!p)
3996 		return -ENOMEM;
3997 
3998 	ret = -EFAULT;
3999 	if (copy_from_user(p, arg, size))
4000 		goto out;
4001 	ret = -EINVAL;
4002 	if (memchr_inv(p, 0, size))
4003 		goto out;
4004 
4005 	p->last_op = IORING_OP_LAST - 1;
4006 	if (nr_args > IORING_OP_LAST)
4007 		nr_args = IORING_OP_LAST;
4008 
4009 	for (i = 0; i < nr_args; i++) {
4010 		p->ops[i].op = i;
4011 		if (!io_issue_defs[i].not_supported)
4012 			p->ops[i].flags = IO_URING_OP_SUPPORTED;
4013 	}
4014 	p->ops_len = i;
4015 
4016 	ret = 0;
4017 	if (copy_to_user(arg, p, size))
4018 		ret = -EFAULT;
4019 out:
4020 	kfree(p);
4021 	return ret;
4022 }
4023 
4024 static int io_register_personality(struct io_ring_ctx *ctx)
4025 {
4026 	const struct cred *creds;
4027 	u32 id;
4028 	int ret;
4029 
4030 	creds = get_current_cred();
4031 
4032 	ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
4033 			XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
4034 	if (ret < 0) {
4035 		put_cred(creds);
4036 		return ret;
4037 	}
4038 	return id;
4039 }
4040 
4041 static __cold int io_register_restrictions(struct io_ring_ctx *ctx,
4042 					   void __user *arg, unsigned int nr_args)
4043 {
4044 	struct io_uring_restriction *res;
4045 	size_t size;
4046 	int i, ret;
4047 
4048 	/* Restrictions allowed only if rings started disabled */
4049 	if (!(ctx->flags & IORING_SETUP_R_DISABLED))
4050 		return -EBADFD;
4051 
4052 	/* We allow only a single restrictions registration */
4053 	if (ctx->restrictions.registered)
4054 		return -EBUSY;
4055 
4056 	if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
4057 		return -EINVAL;
4058 
4059 	size = array_size(nr_args, sizeof(*res));
4060 	if (size == SIZE_MAX)
4061 		return -EOVERFLOW;
4062 
4063 	res = memdup_user(arg, size);
4064 	if (IS_ERR(res))
4065 		return PTR_ERR(res);
4066 
4067 	ret = 0;
4068 
4069 	for (i = 0; i < nr_args; i++) {
4070 		switch (res[i].opcode) {
4071 		case IORING_RESTRICTION_REGISTER_OP:
4072 			if (res[i].register_op >= IORING_REGISTER_LAST) {
4073 				ret = -EINVAL;
4074 				goto out;
4075 			}
4076 
4077 			__set_bit(res[i].register_op,
4078 				  ctx->restrictions.register_op);
4079 			break;
4080 		case IORING_RESTRICTION_SQE_OP:
4081 			if (res[i].sqe_op >= IORING_OP_LAST) {
4082 				ret = -EINVAL;
4083 				goto out;
4084 			}
4085 
4086 			__set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
4087 			break;
4088 		case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
4089 			ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
4090 			break;
4091 		case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
4092 			ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
4093 			break;
4094 		default:
4095 			ret = -EINVAL;
4096 			goto out;
4097 		}
4098 	}
4099 
4100 out:
4101 	/* Reset all restrictions if an error happened */
4102 	if (ret != 0)
4103 		memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
4104 	else
4105 		ctx->restrictions.registered = true;
4106 
4107 	kfree(res);
4108 	return ret;
4109 }
4110 
4111 static int io_register_enable_rings(struct io_ring_ctx *ctx)
4112 {
4113 	if (!(ctx->flags & IORING_SETUP_R_DISABLED))
4114 		return -EBADFD;
4115 
4116 	if (ctx->flags & IORING_SETUP_SINGLE_ISSUER && !ctx->submitter_task) {
4117 		WRITE_ONCE(ctx->submitter_task, get_task_struct(current));
4118 		/*
4119 		 * Lazy activation attempts would fail if it was polled before
4120 		 * submitter_task is set.
4121 		 */
4122 		if (wq_has_sleeper(&ctx->poll_wq))
4123 			io_activate_pollwq(ctx);
4124 	}
4125 
4126 	if (ctx->restrictions.registered)
4127 		ctx->restricted = 1;
4128 
4129 	ctx->flags &= ~IORING_SETUP_R_DISABLED;
4130 	if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
4131 		wake_up(&ctx->sq_data->wait);
4132 	return 0;
4133 }
4134 
4135 static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
4136 				       void __user *arg, unsigned len)
4137 {
4138 	struct io_uring_task *tctx = current->io_uring;
4139 	cpumask_var_t new_mask;
4140 	int ret;
4141 
4142 	if (!tctx || !tctx->io_wq)
4143 		return -EINVAL;
4144 
4145 	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
4146 		return -ENOMEM;
4147 
4148 	cpumask_clear(new_mask);
4149 	if (len > cpumask_size())
4150 		len = cpumask_size();
4151 
4152 	if (in_compat_syscall()) {
4153 		ret = compat_get_bitmap(cpumask_bits(new_mask),
4154 					(const compat_ulong_t __user *)arg,
4155 					len * 8 /* CHAR_BIT */);
4156 	} else {
4157 		ret = copy_from_user(new_mask, arg, len);
4158 	}
4159 
4160 	if (ret) {
4161 		free_cpumask_var(new_mask);
4162 		return -EFAULT;
4163 	}
4164 
4165 	ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
4166 	free_cpumask_var(new_mask);
4167 	return ret;
4168 }
4169 
4170 static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
4171 {
4172 	struct io_uring_task *tctx = current->io_uring;
4173 
4174 	if (!tctx || !tctx->io_wq)
4175 		return -EINVAL;
4176 
4177 	return io_wq_cpu_affinity(tctx->io_wq, NULL);
4178 }
4179 
4180 static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
4181 					       void __user *arg)
4182 	__must_hold(&ctx->uring_lock)
4183 {
4184 	struct io_tctx_node *node;
4185 	struct io_uring_task *tctx = NULL;
4186 	struct io_sq_data *sqd = NULL;
4187 	__u32 new_count[2];
4188 	int i, ret;
4189 
4190 	if (copy_from_user(new_count, arg, sizeof(new_count)))
4191 		return -EFAULT;
4192 	for (i = 0; i < ARRAY_SIZE(new_count); i++)
4193 		if (new_count[i] > INT_MAX)
4194 			return -EINVAL;
4195 
4196 	if (ctx->flags & IORING_SETUP_SQPOLL) {
4197 		sqd = ctx->sq_data;
4198 		if (sqd) {
4199 			/*
4200 			 * Observe the correct sqd->lock -> ctx->uring_lock
4201 			 * ordering. Fine to drop uring_lock here, we hold
4202 			 * a ref to the ctx.
4203 			 */
4204 			refcount_inc(&sqd->refs);
4205 			mutex_unlock(&ctx->uring_lock);
4206 			mutex_lock(&sqd->lock);
4207 			mutex_lock(&ctx->uring_lock);
4208 			if (sqd->thread)
4209 				tctx = sqd->thread->io_uring;
4210 		}
4211 	} else {
4212 		tctx = current->io_uring;
4213 	}
4214 
4215 	BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
4216 
4217 	for (i = 0; i < ARRAY_SIZE(new_count); i++)
4218 		if (new_count[i])
4219 			ctx->iowq_limits[i] = new_count[i];
4220 	ctx->iowq_limits_set = true;
4221 
4222 	if (tctx && tctx->io_wq) {
4223 		ret = io_wq_max_workers(tctx->io_wq, new_count);
4224 		if (ret)
4225 			goto err;
4226 	} else {
4227 		memset(new_count, 0, sizeof(new_count));
4228 	}
4229 
4230 	if (sqd) {
4231 		mutex_unlock(&sqd->lock);
4232 		io_put_sq_data(sqd);
4233 	}
4234 
4235 	if (copy_to_user(arg, new_count, sizeof(new_count)))
4236 		return -EFAULT;
4237 
4238 	/* that's it for SQPOLL, only the SQPOLL task creates requests */
4239 	if (sqd)
4240 		return 0;
4241 
4242 	/* now propagate the restriction to all registered users */
4243 	list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
4244 		struct io_uring_task *tctx = node->task->io_uring;
4245 
4246 		if (WARN_ON_ONCE(!tctx->io_wq))
4247 			continue;
4248 
4249 		for (i = 0; i < ARRAY_SIZE(new_count); i++)
4250 			new_count[i] = ctx->iowq_limits[i];
4251 		/* ignore errors, it always returns zero anyway */
4252 		(void)io_wq_max_workers(tctx->io_wq, new_count);
4253 	}
4254 	return 0;
4255 err:
4256 	if (sqd) {
4257 		mutex_unlock(&sqd->lock);
4258 		io_put_sq_data(sqd);
4259 	}
4260 	return ret;
4261 }
4262 
4263 static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
4264 			       void __user *arg, unsigned nr_args)
4265 	__releases(ctx->uring_lock)
4266 	__acquires(ctx->uring_lock)
4267 {
4268 	int ret;
4269 
4270 	/*
4271 	 * We don't quiesce the refs for register anymore and so it can't be
4272 	 * dying as we're holding a file ref here.
4273 	 */
4274 	if (WARN_ON_ONCE(percpu_ref_is_dying(&ctx->refs)))
4275 		return -ENXIO;
4276 
4277 	if (ctx->submitter_task && ctx->submitter_task != current)
4278 		return -EEXIST;
4279 
4280 	if (ctx->restricted) {
4281 		opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
4282 		if (!test_bit(opcode, ctx->restrictions.register_op))
4283 			return -EACCES;
4284 	}
4285 
4286 	switch (opcode) {
4287 	case IORING_REGISTER_BUFFERS:
4288 		ret = -EFAULT;
4289 		if (!arg)
4290 			break;
4291 		ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
4292 		break;
4293 	case IORING_UNREGISTER_BUFFERS:
4294 		ret = -EINVAL;
4295 		if (arg || nr_args)
4296 			break;
4297 		ret = io_sqe_buffers_unregister(ctx);
4298 		break;
4299 	case IORING_REGISTER_FILES:
4300 		ret = -EFAULT;
4301 		if (!arg)
4302 			break;
4303 		ret = io_sqe_files_register(ctx, arg, nr_args, NULL);
4304 		break;
4305 	case IORING_UNREGISTER_FILES:
4306 		ret = -EINVAL;
4307 		if (arg || nr_args)
4308 			break;
4309 		ret = io_sqe_files_unregister(ctx);
4310 		break;
4311 	case IORING_REGISTER_FILES_UPDATE:
4312 		ret = io_register_files_update(ctx, arg, nr_args);
4313 		break;
4314 	case IORING_REGISTER_EVENTFD:
4315 		ret = -EINVAL;
4316 		if (nr_args != 1)
4317 			break;
4318 		ret = io_eventfd_register(ctx, arg, 0);
4319 		break;
4320 	case IORING_REGISTER_EVENTFD_ASYNC:
4321 		ret = -EINVAL;
4322 		if (nr_args != 1)
4323 			break;
4324 		ret = io_eventfd_register(ctx, arg, 1);
4325 		break;
4326 	case IORING_UNREGISTER_EVENTFD:
4327 		ret = -EINVAL;
4328 		if (arg || nr_args)
4329 			break;
4330 		ret = io_eventfd_unregister(ctx);
4331 		break;
4332 	case IORING_REGISTER_PROBE:
4333 		ret = -EINVAL;
4334 		if (!arg || nr_args > 256)
4335 			break;
4336 		ret = io_probe(ctx, arg, nr_args);
4337 		break;
4338 	case IORING_REGISTER_PERSONALITY:
4339 		ret = -EINVAL;
4340 		if (arg || nr_args)
4341 			break;
4342 		ret = io_register_personality(ctx);
4343 		break;
4344 	case IORING_UNREGISTER_PERSONALITY:
4345 		ret = -EINVAL;
4346 		if (arg)
4347 			break;
4348 		ret = io_unregister_personality(ctx, nr_args);
4349 		break;
4350 	case IORING_REGISTER_ENABLE_RINGS:
4351 		ret = -EINVAL;
4352 		if (arg || nr_args)
4353 			break;
4354 		ret = io_register_enable_rings(ctx);
4355 		break;
4356 	case IORING_REGISTER_RESTRICTIONS:
4357 		ret = io_register_restrictions(ctx, arg, nr_args);
4358 		break;
4359 	case IORING_REGISTER_FILES2:
4360 		ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
4361 		break;
4362 	case IORING_REGISTER_FILES_UPDATE2:
4363 		ret = io_register_rsrc_update(ctx, arg, nr_args,
4364 					      IORING_RSRC_FILE);
4365 		break;
4366 	case IORING_REGISTER_BUFFERS2:
4367 		ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
4368 		break;
4369 	case IORING_REGISTER_BUFFERS_UPDATE:
4370 		ret = io_register_rsrc_update(ctx, arg, nr_args,
4371 					      IORING_RSRC_BUFFER);
4372 		break;
4373 	case IORING_REGISTER_IOWQ_AFF:
4374 		ret = -EINVAL;
4375 		if (!arg || !nr_args)
4376 			break;
4377 		ret = io_register_iowq_aff(ctx, arg, nr_args);
4378 		break;
4379 	case IORING_UNREGISTER_IOWQ_AFF:
4380 		ret = -EINVAL;
4381 		if (arg || nr_args)
4382 			break;
4383 		ret = io_unregister_iowq_aff(ctx);
4384 		break;
4385 	case IORING_REGISTER_IOWQ_MAX_WORKERS:
4386 		ret = -EINVAL;
4387 		if (!arg || nr_args != 2)
4388 			break;
4389 		ret = io_register_iowq_max_workers(ctx, arg);
4390 		break;
4391 	case IORING_REGISTER_RING_FDS:
4392 		ret = io_ringfd_register(ctx, arg, nr_args);
4393 		break;
4394 	case IORING_UNREGISTER_RING_FDS:
4395 		ret = io_ringfd_unregister(ctx, arg, nr_args);
4396 		break;
4397 	case IORING_REGISTER_PBUF_RING:
4398 		ret = -EINVAL;
4399 		if (!arg || nr_args != 1)
4400 			break;
4401 		ret = io_register_pbuf_ring(ctx, arg);
4402 		break;
4403 	case IORING_UNREGISTER_PBUF_RING:
4404 		ret = -EINVAL;
4405 		if (!arg || nr_args != 1)
4406 			break;
4407 		ret = io_unregister_pbuf_ring(ctx, arg);
4408 		break;
4409 	case IORING_REGISTER_SYNC_CANCEL:
4410 		ret = -EINVAL;
4411 		if (!arg || nr_args != 1)
4412 			break;
4413 		ret = io_sync_cancel(ctx, arg);
4414 		break;
4415 	case IORING_REGISTER_FILE_ALLOC_RANGE:
4416 		ret = -EINVAL;
4417 		if (!arg || nr_args)
4418 			break;
4419 		ret = io_register_file_alloc_range(ctx, arg);
4420 		break;
4421 	default:
4422 		ret = -EINVAL;
4423 		break;
4424 	}
4425 
4426 	return ret;
4427 }
4428 
4429 SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
4430 		void __user *, arg, unsigned int, nr_args)
4431 {
4432 	struct io_ring_ctx *ctx;
4433 	long ret = -EBADF;
4434 	struct fd f;
4435 	bool use_registered_ring;
4436 
4437 	use_registered_ring = !!(opcode & IORING_REGISTER_USE_REGISTERED_RING);
4438 	opcode &= ~IORING_REGISTER_USE_REGISTERED_RING;
4439 
4440 	if (opcode >= IORING_REGISTER_LAST)
4441 		return -EINVAL;
4442 
4443 	if (use_registered_ring) {
4444 		/*
4445 		 * Ring fd has been registered via IORING_REGISTER_RING_FDS, we
4446 		 * need only dereference our task private array to find it.
4447 		 */
4448 		struct io_uring_task *tctx = current->io_uring;
4449 
4450 		if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX))
4451 			return -EINVAL;
4452 		fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
4453 		f.file = tctx->registered_rings[fd];
4454 		f.flags = 0;
4455 		if (unlikely(!f.file))
4456 			return -EBADF;
4457 	} else {
4458 		f = fdget(fd);
4459 		if (unlikely(!f.file))
4460 			return -EBADF;
4461 		ret = -EOPNOTSUPP;
4462 		if (!io_is_uring_fops(f.file))
4463 			goto out_fput;
4464 	}
4465 
4466 	ctx = f.file->private_data;
4467 
4468 	mutex_lock(&ctx->uring_lock);
4469 	ret = __io_uring_register(ctx, opcode, arg, nr_args);
4470 	mutex_unlock(&ctx->uring_lock);
4471 	trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs, ret);
4472 out_fput:
4473 	fdput(f);
4474 	return ret;
4475 }
4476 
4477 static int __init io_uring_init(void)
4478 {
4479 #define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \
4480 	BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
4481 	BUILD_BUG_ON(sizeof_field(stype, ename) != esize); \
4482 } while (0)
4483 
4484 #define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
4485 	__BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, sizeof(etype), ename)
4486 #define BUILD_BUG_SQE_ELEM_SIZE(eoffset, esize, ename) \
4487 	__BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, esize, ename)
4488 	BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
4489 	BUILD_BUG_SQE_ELEM(0,  __u8,   opcode);
4490 	BUILD_BUG_SQE_ELEM(1,  __u8,   flags);
4491 	BUILD_BUG_SQE_ELEM(2,  __u16,  ioprio);
4492 	BUILD_BUG_SQE_ELEM(4,  __s32,  fd);
4493 	BUILD_BUG_SQE_ELEM(8,  __u64,  off);
4494 	BUILD_BUG_SQE_ELEM(8,  __u64,  addr2);
4495 	BUILD_BUG_SQE_ELEM(8,  __u32,  cmd_op);
4496 	BUILD_BUG_SQE_ELEM(12, __u32, __pad1);
4497 	BUILD_BUG_SQE_ELEM(16, __u64,  addr);
4498 	BUILD_BUG_SQE_ELEM(16, __u64,  splice_off_in);
4499 	BUILD_BUG_SQE_ELEM(24, __u32,  len);
4500 	BUILD_BUG_SQE_ELEM(28,     __kernel_rwf_t, rw_flags);
4501 	BUILD_BUG_SQE_ELEM(28, /* compat */   int, rw_flags);
4502 	BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
4503 	BUILD_BUG_SQE_ELEM(28, __u32,  fsync_flags);
4504 	BUILD_BUG_SQE_ELEM(28, /* compat */ __u16,  poll_events);
4505 	BUILD_BUG_SQE_ELEM(28, __u32,  poll32_events);
4506 	BUILD_BUG_SQE_ELEM(28, __u32,  sync_range_flags);
4507 	BUILD_BUG_SQE_ELEM(28, __u32,  msg_flags);
4508 	BUILD_BUG_SQE_ELEM(28, __u32,  timeout_flags);
4509 	BUILD_BUG_SQE_ELEM(28, __u32,  accept_flags);
4510 	BUILD_BUG_SQE_ELEM(28, __u32,  cancel_flags);
4511 	BUILD_BUG_SQE_ELEM(28, __u32,  open_flags);
4512 	BUILD_BUG_SQE_ELEM(28, __u32,  statx_flags);
4513 	BUILD_BUG_SQE_ELEM(28, __u32,  fadvise_advice);
4514 	BUILD_BUG_SQE_ELEM(28, __u32,  splice_flags);
4515 	BUILD_BUG_SQE_ELEM(28, __u32,  rename_flags);
4516 	BUILD_BUG_SQE_ELEM(28, __u32,  unlink_flags);
4517 	BUILD_BUG_SQE_ELEM(28, __u32,  hardlink_flags);
4518 	BUILD_BUG_SQE_ELEM(28, __u32,  xattr_flags);
4519 	BUILD_BUG_SQE_ELEM(28, __u32,  msg_ring_flags);
4520 	BUILD_BUG_SQE_ELEM(32, __u64,  user_data);
4521 	BUILD_BUG_SQE_ELEM(40, __u16,  buf_index);
4522 	BUILD_BUG_SQE_ELEM(40, __u16,  buf_group);
4523 	BUILD_BUG_SQE_ELEM(42, __u16,  personality);
4524 	BUILD_BUG_SQE_ELEM(44, __s32,  splice_fd_in);
4525 	BUILD_BUG_SQE_ELEM(44, __u32,  file_index);
4526 	BUILD_BUG_SQE_ELEM(44, __u16,  addr_len);
4527 	BUILD_BUG_SQE_ELEM(46, __u16,  __pad3[0]);
4528 	BUILD_BUG_SQE_ELEM(48, __u64,  addr3);
4529 	BUILD_BUG_SQE_ELEM_SIZE(48, 0, cmd);
4530 	BUILD_BUG_SQE_ELEM(56, __u64,  __pad2);
4531 
4532 	BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
4533 		     sizeof(struct io_uring_rsrc_update));
4534 	BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
4535 		     sizeof(struct io_uring_rsrc_update2));
4536 
4537 	/* ->buf_index is u16 */
4538 	BUILD_BUG_ON(offsetof(struct io_uring_buf_ring, bufs) != 0);
4539 	BUILD_BUG_ON(offsetof(struct io_uring_buf, resv) !=
4540 		     offsetof(struct io_uring_buf_ring, tail));
4541 
4542 	/* should fit into one byte */
4543 	BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
4544 	BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8));
4545 	BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS);
4546 
4547 	BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int));
4548 
4549 	BUILD_BUG_ON(sizeof(atomic_t) != sizeof(u32));
4550 
4551 	io_uring_optable_init();
4552 
4553 	req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
4554 				SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU);
4555 	return 0;
4556 };
4557 __initcall(io_uring_init);
4558