xref: /openbmc/linux/io_uring/io_uring.h (revision e0486f3f)
1de23077eSJens Axboe #ifndef IOU_CORE_H
2de23077eSJens Axboe #define IOU_CORE_H
3de23077eSJens Axboe 
4de23077eSJens Axboe #include <linux/errno.h>
5cd40cae2SJens Axboe #include <linux/lockdep.h>
6ab1c84d8SPavel Begunkov #include <linux/io_uring_types.h>
7ab1c84d8SPavel Begunkov #include "io-wq.h"
8a6b21fbbSPavel Begunkov #include "slist.h"
9ab1c84d8SPavel Begunkov #include "filetable.h"
10de23077eSJens Axboe 
11f3b44f92SJens Axboe #ifndef CREATE_TRACE_POINTS
12f3b44f92SJens Axboe #include <trace/events/io_uring.h>
13f3b44f92SJens Axboe #endif
14f3b44f92SJens Axboe 
1597b388d7SJens Axboe enum {
1697b388d7SJens Axboe 	IOU_OK			= 0,
1797b388d7SJens Axboe 	IOU_ISSUE_SKIP_COMPLETE	= -EIOCBQUEUED,
18114eccdfSDylan Yudaken 
19114eccdfSDylan Yudaken 	/*
20114eccdfSDylan Yudaken 	 * Intended only when both REQ_F_POLLED and REQ_F_APOLL_MULTISHOT
21114eccdfSDylan Yudaken 	 * are set to indicate to the poll runner that multishot should be
22114eccdfSDylan Yudaken 	 * removed and the result is set on req->cqe.res.
23114eccdfSDylan Yudaken 	 */
24114eccdfSDylan Yudaken 	IOU_STOP_MULTISHOT	= -ECANCELED,
2597b388d7SJens Axboe };
2697b388d7SJens Axboe 
27faf88ddeSPavel Begunkov struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx);
2868494a65SPavel Begunkov bool io_req_cqe_overflow(struct io_kiocb *req);
299046c641SPavel Begunkov int io_run_task_work_sig(void);
309046c641SPavel Begunkov void io_req_complete_failed(struct io_kiocb *req, s32 res);
319046c641SPavel Begunkov void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
329046c641SPavel Begunkov void io_req_complete_post(struct io_kiocb *req);
339046c641SPavel Begunkov void __io_req_complete_post(struct io_kiocb *req);
3452120f0fSDylan Yudaken bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
3552120f0fSDylan Yudaken 		     bool allow_overflow);
369046c641SPavel Begunkov void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
379046c641SPavel Begunkov 
389046c641SPavel Begunkov struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
399046c641SPavel Begunkov 
409046c641SPavel Begunkov struct file *io_file_get_normal(struct io_kiocb *req, int fd);
419046c641SPavel Begunkov struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
429046c641SPavel Begunkov 			       unsigned issue_flags);
439046c641SPavel Begunkov 
449046c641SPavel Begunkov bool io_is_uring_fops(struct file *file);
459046c641SPavel Begunkov bool io_alloc_async_data(struct io_kiocb *req);
469046c641SPavel Begunkov void io_req_task_work_add(struct io_kiocb *req);
479046c641SPavel Begunkov void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags);
489046c641SPavel Begunkov void io_req_task_queue(struct io_kiocb *req);
499046c641SPavel Begunkov void io_queue_iowq(struct io_kiocb *req, bool *dont_use);
509046c641SPavel Begunkov void io_req_task_complete(struct io_kiocb *req, bool *locked);
519046c641SPavel Begunkov void io_req_task_queue_fail(struct io_kiocb *req, int ret);
529046c641SPavel Begunkov void io_req_task_submit(struct io_kiocb *req, bool *locked);
539046c641SPavel Begunkov void tctx_task_work(struct callback_head *cb);
549046c641SPavel Begunkov __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
559046c641SPavel Begunkov int io_uring_alloc_task_context(struct task_struct *task,
569046c641SPavel Begunkov 				struct io_ring_ctx *ctx);
579046c641SPavel Begunkov 
589046c641SPavel Begunkov int io_poll_issue(struct io_kiocb *req, bool *locked);
599046c641SPavel Begunkov int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
609046c641SPavel Begunkov int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
619046c641SPavel Begunkov void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node);
629046c641SPavel Begunkov int io_req_prep_async(struct io_kiocb *req);
639046c641SPavel Begunkov 
649046c641SPavel Begunkov struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
659046c641SPavel Begunkov void io_wq_submit_work(struct io_wq_work *work);
669046c641SPavel Begunkov 
679046c641SPavel Begunkov void io_free_req(struct io_kiocb *req);
689046c641SPavel Begunkov void io_queue_next(struct io_kiocb *req);
699046c641SPavel Begunkov 
709046c641SPavel Begunkov bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
719046c641SPavel Begunkov 			bool cancel_all);
729046c641SPavel Begunkov 
739046c641SPavel Begunkov #define io_for_each_link(pos, head) \
749046c641SPavel Begunkov 	for (pos = (head); pos; pos = pos->link)
75f3b44f92SJens Axboe 
7625399321SPavel Begunkov static inline void io_cq_lock(struct io_ring_ctx *ctx)
7725399321SPavel Begunkov 	__acquires(ctx->completion_lock)
7825399321SPavel Begunkov {
7925399321SPavel Begunkov 	spin_lock(&ctx->completion_lock);
8025399321SPavel Begunkov }
8125399321SPavel Begunkov 
8225399321SPavel Begunkov void io_cq_unlock_post(struct io_ring_ctx *ctx);
8325399321SPavel Begunkov 
84f3b44f92SJens Axboe static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
85f3b44f92SJens Axboe {
86f3b44f92SJens Axboe 	if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
87f3b44f92SJens Axboe 		struct io_uring_cqe *cqe = ctx->cqe_cached;
88f3b44f92SJens Axboe 
89f3b44f92SJens Axboe 		ctx->cached_cq_tail++;
90f3b44f92SJens Axboe 		ctx->cqe_cached++;
91b3659a65SPavel Begunkov 		if (ctx->flags & IORING_SETUP_CQE32)
92b3659a65SPavel Begunkov 			ctx->cqe_cached++;
93f3b44f92SJens Axboe 		return cqe;
94f3b44f92SJens Axboe 	}
95f3b44f92SJens Axboe 
96f3b44f92SJens Axboe 	return __io_get_cqe(ctx);
97f3b44f92SJens Axboe }
98f3b44f92SJens Axboe 
99f3b44f92SJens Axboe static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
100f3b44f92SJens Axboe 				     struct io_kiocb *req)
101f3b44f92SJens Axboe {
102f3b44f92SJens Axboe 	struct io_uring_cqe *cqe;
103f3b44f92SJens Axboe 
104f3b44f92SJens Axboe 	/*
105f3b44f92SJens Axboe 	 * If we can't get a cq entry, userspace overflowed the
106f3b44f92SJens Axboe 	 * submission (by quite a lot). Increment the overflow count in
107f3b44f92SJens Axboe 	 * the ring.
108f3b44f92SJens Axboe 	 */
109f3b44f92SJens Axboe 	cqe = io_get_cqe(ctx);
110e8c328c3SPavel Begunkov 	if (unlikely(!cqe))
111e8c328c3SPavel Begunkov 		return io_req_cqe_overflow(req);
112*e0486f3fSDylan Yudaken 
113*e0486f3fSDylan Yudaken 	trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
114*e0486f3fSDylan Yudaken 				req->cqe.res, req->cqe.flags,
115*e0486f3fSDylan Yudaken 				(req->flags & REQ_F_CQE32_INIT) ? req->extra1 : 0,
116*e0486f3fSDylan Yudaken 				(req->flags & REQ_F_CQE32_INIT) ? req->extra2 : 0);
117*e0486f3fSDylan Yudaken 
118f3b44f92SJens Axboe 	memcpy(cqe, &req->cqe, sizeof(*cqe));
119e8c328c3SPavel Begunkov 
120e8c328c3SPavel Begunkov 	if (ctx->flags & IORING_SETUP_CQE32) {
121f3b44f92SJens Axboe 		u64 extra1 = 0, extra2 = 0;
122f3b44f92SJens Axboe 
123f3b44f92SJens Axboe 		if (req->flags & REQ_F_CQE32_INIT) {
124f3b44f92SJens Axboe 			extra1 = req->extra1;
125f3b44f92SJens Axboe 			extra2 = req->extra2;
126f3b44f92SJens Axboe 		}
127f3b44f92SJens Axboe 
128f3b44f92SJens Axboe 		WRITE_ONCE(cqe->big_cqe[0], extra1);
129f3b44f92SJens Axboe 		WRITE_ONCE(cqe->big_cqe[1], extra2);
130e8c328c3SPavel Begunkov 	}
131f3b44f92SJens Axboe 	return true;
132f3b44f92SJens Axboe }
133f3b44f92SJens Axboe 
134531113bbSJens Axboe static inline void req_set_fail(struct io_kiocb *req)
135531113bbSJens Axboe {
136531113bbSJens Axboe 	req->flags |= REQ_F_FAIL;
137531113bbSJens Axboe 	if (req->flags & REQ_F_CQE_SKIP) {
138531113bbSJens Axboe 		req->flags &= ~REQ_F_CQE_SKIP;
139531113bbSJens Axboe 		req->flags |= REQ_F_SKIP_LINK_CQES;
140531113bbSJens Axboe 	}
141531113bbSJens Axboe }
142531113bbSJens Axboe 
143de23077eSJens Axboe static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
144de23077eSJens Axboe {
145de23077eSJens Axboe 	req->cqe.res = res;
146de23077eSJens Axboe 	req->cqe.flags = cflags;
147de23077eSJens Axboe }
148de23077eSJens Axboe 
14999f15d8dSJens Axboe static inline bool req_has_async_data(struct io_kiocb *req)
15099f15d8dSJens Axboe {
15199f15d8dSJens Axboe 	return req->flags & REQ_F_ASYNC_DATA;
15299f15d8dSJens Axboe }
15399f15d8dSJens Axboe 
154531113bbSJens Axboe static inline void io_put_file(struct file *file)
155531113bbSJens Axboe {
156531113bbSJens Axboe 	if (file)
157531113bbSJens Axboe 		fput(file);
158531113bbSJens Axboe }
159531113bbSJens Axboe 
160cd40cae2SJens Axboe static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
161cd40cae2SJens Axboe 					 unsigned issue_flags)
162cd40cae2SJens Axboe {
163cd40cae2SJens Axboe 	lockdep_assert_held(&ctx->uring_lock);
164cd40cae2SJens Axboe 	if (issue_flags & IO_URING_F_UNLOCKED)
165cd40cae2SJens Axboe 		mutex_unlock(&ctx->uring_lock);
166cd40cae2SJens Axboe }
167cd40cae2SJens Axboe 
168cd40cae2SJens Axboe static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
169cd40cae2SJens Axboe 				       unsigned issue_flags)
170cd40cae2SJens Axboe {
171cd40cae2SJens Axboe 	/*
172cd40cae2SJens Axboe 	 * "Normal" inline submissions always hold the uring_lock, since we
173cd40cae2SJens Axboe 	 * grab it from the system call. Same is true for the SQPOLL offload.
174cd40cae2SJens Axboe 	 * The only exception is when we've detached the request and issue it
175cd40cae2SJens Axboe 	 * from an async worker thread, grab the lock for that case.
176cd40cae2SJens Axboe 	 */
177cd40cae2SJens Axboe 	if (issue_flags & IO_URING_F_UNLOCKED)
178cd40cae2SJens Axboe 		mutex_lock(&ctx->uring_lock);
179cd40cae2SJens Axboe 	lockdep_assert_held(&ctx->uring_lock);
180cd40cae2SJens Axboe }
181cd40cae2SJens Axboe 
182f9ead18cSJens Axboe static inline void io_commit_cqring(struct io_ring_ctx *ctx)
183f9ead18cSJens Axboe {
184f9ead18cSJens Axboe 	/* order cqe stores with ring update */
185f9ead18cSJens Axboe 	smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
186f9ead18cSJens Axboe }
187f9ead18cSJens Axboe 
188f3b44f92SJens Axboe static inline void io_cqring_wake(struct io_ring_ctx *ctx)
189f3b44f92SJens Axboe {
190f3b44f92SJens Axboe 	/*
191f3b44f92SJens Axboe 	 * wake_up_all() may seem excessive, but io_wake_function() and
192f3b44f92SJens Axboe 	 * io_should_wake() handle the termination of the loop and only
193f3b44f92SJens Axboe 	 * wake as many waiters as we need to.
194f3b44f92SJens Axboe 	 */
195f3b44f92SJens Axboe 	if (wq_has_sleeper(&ctx->cq_wait))
196f3b44f92SJens Axboe 		wake_up_all(&ctx->cq_wait);
197f3b44f92SJens Axboe }
198f3b44f92SJens Axboe 
19917437f31SJens Axboe static inline bool io_sqring_full(struct io_ring_ctx *ctx)
20017437f31SJens Axboe {
20117437f31SJens Axboe 	struct io_rings *r = ctx->rings;
20217437f31SJens Axboe 
20317437f31SJens Axboe 	return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
20417437f31SJens Axboe }
20517437f31SJens Axboe 
20617437f31SJens Axboe static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
20717437f31SJens Axboe {
20817437f31SJens Axboe 	struct io_rings *rings = ctx->rings;
20917437f31SJens Axboe 
21017437f31SJens Axboe 	/* make sure SQ entry isn't read before tail */
21117437f31SJens Axboe 	return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
21217437f31SJens Axboe }
21317437f31SJens Axboe 
21417437f31SJens Axboe static inline bool io_run_task_work(void)
21517437f31SJens Axboe {
216625d38b3SPavel Begunkov 	if (test_thread_flag(TIF_NOTIFY_SIGNAL)) {
21717437f31SJens Axboe 		__set_current_state(TASK_RUNNING);
21817437f31SJens Axboe 		clear_notify_signal();
21917437f31SJens Axboe 		if (task_work_pending(current))
22017437f31SJens Axboe 			task_work_run();
22117437f31SJens Axboe 		return true;
22217437f31SJens Axboe 	}
22317437f31SJens Axboe 
22417437f31SJens Axboe 	return false;
22517437f31SJens Axboe }
22617437f31SJens Axboe 
227aa1e90f6SPavel Begunkov static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
228aa1e90f6SPavel Begunkov {
229aa1e90f6SPavel Begunkov 	if (!*locked) {
230aa1e90f6SPavel Begunkov 		mutex_lock(&ctx->uring_lock);
231aa1e90f6SPavel Begunkov 		*locked = true;
232aa1e90f6SPavel Begunkov 	}
233aa1e90f6SPavel Begunkov }
234aa1e90f6SPavel Begunkov 
2359da070b1SPavel Begunkov /*
2369da070b1SPavel Begunkov  * Don't complete immediately but use deferred completion infrastructure.
2379da070b1SPavel Begunkov  * Protected by ->uring_lock and can only be used either with
2389da070b1SPavel Begunkov  * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
2399da070b1SPavel Begunkov  */
2409da070b1SPavel Begunkov static inline void io_req_complete_defer(struct io_kiocb *req)
2419da070b1SPavel Begunkov 	__must_hold(&req->ctx->uring_lock)
242aa1e90f6SPavel Begunkov {
243aa1e90f6SPavel Begunkov 	struct io_submit_state *state = &req->ctx->submit_state;
244aa1e90f6SPavel Begunkov 
2459da070b1SPavel Begunkov 	lockdep_assert_held(&req->ctx->uring_lock);
2469da070b1SPavel Begunkov 
247aa1e90f6SPavel Begunkov 	wq_list_add_tail(&req->comp_list, &state->compl_reqs);
248aa1e90f6SPavel Begunkov }
249aa1e90f6SPavel Begunkov 
25046929b08SPavel Begunkov static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
25146929b08SPavel Begunkov {
25246929b08SPavel Begunkov 	if (unlikely(ctx->off_timeout_used || ctx->drain_active || ctx->has_evfd))
25346929b08SPavel Begunkov 		__io_commit_cqring_flush(ctx);
25446929b08SPavel Begunkov }
25546929b08SPavel Begunkov 
256de23077eSJens Axboe #endif
257