xref: /openbmc/linux/io_uring/io_uring.h (revision 6567506b)
1de23077eSJens Axboe #ifndef IOU_CORE_H
2de23077eSJens Axboe #define IOU_CORE_H
3de23077eSJens Axboe 
4de23077eSJens Axboe #include <linux/errno.h>
5cd40cae2SJens Axboe #include <linux/lockdep.h>
6ab1c84d8SPavel Begunkov #include <linux/io_uring_types.h>
7ab1c84d8SPavel Begunkov #include "io-wq.h"
8a6b21fbbSPavel Begunkov #include "slist.h"
9ab1c84d8SPavel Begunkov #include "filetable.h"
10de23077eSJens Axboe 
11f3b44f92SJens Axboe #ifndef CREATE_TRACE_POINTS
12f3b44f92SJens Axboe #include <trace/events/io_uring.h>
13f3b44f92SJens Axboe #endif
14f3b44f92SJens Axboe 
1597b388d7SJens Axboe enum {
1697b388d7SJens Axboe 	IOU_OK			= 0,
1797b388d7SJens Axboe 	IOU_ISSUE_SKIP_COMPLETE	= -EIOCBQUEUED,
18114eccdfSDylan Yudaken 
19114eccdfSDylan Yudaken 	/*
20114eccdfSDylan Yudaken 	 * Intended only when both REQ_F_POLLED and REQ_F_APOLL_MULTISHOT
21114eccdfSDylan Yudaken 	 * are set to indicate to the poll runner that multishot should be
22114eccdfSDylan Yudaken 	 * removed and the result is set on req->cqe.res.
23114eccdfSDylan Yudaken 	 */
24114eccdfSDylan Yudaken 	IOU_STOP_MULTISHOT	= -ECANCELED,
2597b388d7SJens Axboe };
2697b388d7SJens Axboe 
27faf88ddeSPavel Begunkov struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx);
2868494a65SPavel Begunkov bool io_req_cqe_overflow(struct io_kiocb *req);
29c0e0d6baSDylan Yudaken int io_run_task_work_sig(struct io_ring_ctx *ctx);
308ac5d85aSJens Axboe int __io_run_local_work(struct io_ring_ctx *ctx, bool locked);
31c0e0d6baSDylan Yudaken int io_run_local_work(struct io_ring_ctx *ctx);
329046c641SPavel Begunkov void io_req_complete_failed(struct io_kiocb *req, s32 res);
339046c641SPavel Begunkov void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
349046c641SPavel Begunkov void io_req_complete_post(struct io_kiocb *req);
359046c641SPavel Begunkov void __io_req_complete_post(struct io_kiocb *req);
3652120f0fSDylan Yudaken bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
3752120f0fSDylan Yudaken 		     bool allow_overflow);
38eb42cebbSPavel Begunkov bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
39eb42cebbSPavel Begunkov 		     bool allow_overflow);
409046c641SPavel Begunkov void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
419046c641SPavel Begunkov 
429046c641SPavel Begunkov struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
439046c641SPavel Begunkov 
449046c641SPavel Begunkov struct file *io_file_get_normal(struct io_kiocb *req, int fd);
459046c641SPavel Begunkov struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
469046c641SPavel Begunkov 			       unsigned issue_flags);
479046c641SPavel Begunkov 
48f6b543fdSJens Axboe static inline bool io_req_ffs_set(struct io_kiocb *req)
49f6b543fdSJens Axboe {
50f6b543fdSJens Axboe 	return req->flags & REQ_F_FIXED_FILE;
51f6b543fdSJens Axboe }
52f6b543fdSJens Axboe 
539046c641SPavel Begunkov bool io_is_uring_fops(struct file *file);
549046c641SPavel Begunkov bool io_alloc_async_data(struct io_kiocb *req);
559046c641SPavel Begunkov void io_req_task_work_add(struct io_kiocb *req);
569046c641SPavel Begunkov void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags);
579046c641SPavel Begunkov void io_req_task_queue(struct io_kiocb *req);
589046c641SPavel Begunkov void io_queue_iowq(struct io_kiocb *req, bool *dont_use);
599046c641SPavel Begunkov void io_req_task_complete(struct io_kiocb *req, bool *locked);
609046c641SPavel Begunkov void io_req_task_queue_fail(struct io_kiocb *req, int ret);
619046c641SPavel Begunkov void io_req_task_submit(struct io_kiocb *req, bool *locked);
629046c641SPavel Begunkov void tctx_task_work(struct callback_head *cb);
639046c641SPavel Begunkov __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
649046c641SPavel Begunkov int io_uring_alloc_task_context(struct task_struct *task,
659046c641SPavel Begunkov 				struct io_ring_ctx *ctx);
669046c641SPavel Begunkov 
679046c641SPavel Begunkov int io_poll_issue(struct io_kiocb *req, bool *locked);
689046c641SPavel Begunkov int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
699046c641SPavel Begunkov int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
709046c641SPavel Begunkov void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node);
719046c641SPavel Begunkov int io_req_prep_async(struct io_kiocb *req);
729046c641SPavel Begunkov 
739046c641SPavel Begunkov struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
749046c641SPavel Begunkov void io_wq_submit_work(struct io_wq_work *work);
759046c641SPavel Begunkov 
769046c641SPavel Begunkov void io_free_req(struct io_kiocb *req);
779046c641SPavel Begunkov void io_queue_next(struct io_kiocb *req);
78e70cb608SPavel Begunkov void __io_put_task(struct task_struct *task, int nr);
7963809137SPavel Begunkov void io_task_refs_refill(struct io_uring_task *tctx);
80bd1a3783SPavel Begunkov bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
819046c641SPavel Begunkov 
829046c641SPavel Begunkov bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
839046c641SPavel Begunkov 			bool cancel_all);
849046c641SPavel Begunkov 
859046c641SPavel Begunkov #define io_for_each_link(pos, head) \
869046c641SPavel Begunkov 	for (pos = (head); pos; pos = pos->link)
87f3b44f92SJens Axboe 
8825399321SPavel Begunkov static inline void io_cq_lock(struct io_ring_ctx *ctx)
8925399321SPavel Begunkov 	__acquires(ctx->completion_lock)
9025399321SPavel Begunkov {
9125399321SPavel Begunkov 	spin_lock(&ctx->completion_lock);
9225399321SPavel Begunkov }
9325399321SPavel Begunkov 
9425399321SPavel Begunkov void io_cq_unlock_post(struct io_ring_ctx *ctx);
9525399321SPavel Begunkov 
96f3b44f92SJens Axboe static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
97f3b44f92SJens Axboe {
98f3b44f92SJens Axboe 	if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
99f3b44f92SJens Axboe 		struct io_uring_cqe *cqe = ctx->cqe_cached;
100f3b44f92SJens Axboe 
101f3b44f92SJens Axboe 		ctx->cached_cq_tail++;
102f3b44f92SJens Axboe 		ctx->cqe_cached++;
103b3659a65SPavel Begunkov 		if (ctx->flags & IORING_SETUP_CQE32)
104b3659a65SPavel Begunkov 			ctx->cqe_cached++;
105f3b44f92SJens Axboe 		return cqe;
106f3b44f92SJens Axboe 	}
107f3b44f92SJens Axboe 
108f3b44f92SJens Axboe 	return __io_get_cqe(ctx);
109f3b44f92SJens Axboe }
110f3b44f92SJens Axboe 
111f3b44f92SJens Axboe static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
112f3b44f92SJens Axboe 				     struct io_kiocb *req)
113f3b44f92SJens Axboe {
114f3b44f92SJens Axboe 	struct io_uring_cqe *cqe;
115f3b44f92SJens Axboe 
116f3b44f92SJens Axboe 	/*
117f3b44f92SJens Axboe 	 * If we can't get a cq entry, userspace overflowed the
118f3b44f92SJens Axboe 	 * submission (by quite a lot). Increment the overflow count in
119f3b44f92SJens Axboe 	 * the ring.
120f3b44f92SJens Axboe 	 */
121f3b44f92SJens Axboe 	cqe = io_get_cqe(ctx);
122e8c328c3SPavel Begunkov 	if (unlikely(!cqe))
123e8c328c3SPavel Begunkov 		return io_req_cqe_overflow(req);
124e0486f3fSDylan Yudaken 
125e0486f3fSDylan Yudaken 	trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
126e0486f3fSDylan Yudaken 				req->cqe.res, req->cqe.flags,
127e0486f3fSDylan Yudaken 				(req->flags & REQ_F_CQE32_INIT) ? req->extra1 : 0,
128e0486f3fSDylan Yudaken 				(req->flags & REQ_F_CQE32_INIT) ? req->extra2 : 0);
129e0486f3fSDylan Yudaken 
130f3b44f92SJens Axboe 	memcpy(cqe, &req->cqe, sizeof(*cqe));
131e8c328c3SPavel Begunkov 
132e8c328c3SPavel Begunkov 	if (ctx->flags & IORING_SETUP_CQE32) {
133f3b44f92SJens Axboe 		u64 extra1 = 0, extra2 = 0;
134f3b44f92SJens Axboe 
135f3b44f92SJens Axboe 		if (req->flags & REQ_F_CQE32_INIT) {
136f3b44f92SJens Axboe 			extra1 = req->extra1;
137f3b44f92SJens Axboe 			extra2 = req->extra2;
138f3b44f92SJens Axboe 		}
139f3b44f92SJens Axboe 
140f3b44f92SJens Axboe 		WRITE_ONCE(cqe->big_cqe[0], extra1);
141f3b44f92SJens Axboe 		WRITE_ONCE(cqe->big_cqe[1], extra2);
142e8c328c3SPavel Begunkov 	}
143f3b44f92SJens Axboe 	return true;
144f3b44f92SJens Axboe }
145f3b44f92SJens Axboe 
146531113bbSJens Axboe static inline void req_set_fail(struct io_kiocb *req)
147531113bbSJens Axboe {
148531113bbSJens Axboe 	req->flags |= REQ_F_FAIL;
149531113bbSJens Axboe 	if (req->flags & REQ_F_CQE_SKIP) {
150531113bbSJens Axboe 		req->flags &= ~REQ_F_CQE_SKIP;
151531113bbSJens Axboe 		req->flags |= REQ_F_SKIP_LINK_CQES;
152531113bbSJens Axboe 	}
153531113bbSJens Axboe }
154531113bbSJens Axboe 
155de23077eSJens Axboe static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
156de23077eSJens Axboe {
157de23077eSJens Axboe 	req->cqe.res = res;
158de23077eSJens Axboe 	req->cqe.flags = cflags;
159de23077eSJens Axboe }
160de23077eSJens Axboe 
16199f15d8dSJens Axboe static inline bool req_has_async_data(struct io_kiocb *req)
16299f15d8dSJens Axboe {
16399f15d8dSJens Axboe 	return req->flags & REQ_F_ASYNC_DATA;
16499f15d8dSJens Axboe }
16599f15d8dSJens Axboe 
166531113bbSJens Axboe static inline void io_put_file(struct file *file)
167531113bbSJens Axboe {
168531113bbSJens Axboe 	if (file)
169531113bbSJens Axboe 		fput(file);
170531113bbSJens Axboe }
171531113bbSJens Axboe 
172cd40cae2SJens Axboe static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
173cd40cae2SJens Axboe 					 unsigned issue_flags)
174cd40cae2SJens Axboe {
175cd40cae2SJens Axboe 	lockdep_assert_held(&ctx->uring_lock);
176cd40cae2SJens Axboe 	if (issue_flags & IO_URING_F_UNLOCKED)
177cd40cae2SJens Axboe 		mutex_unlock(&ctx->uring_lock);
178cd40cae2SJens Axboe }
179cd40cae2SJens Axboe 
180cd40cae2SJens Axboe static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
181cd40cae2SJens Axboe 				       unsigned issue_flags)
182cd40cae2SJens Axboe {
183cd40cae2SJens Axboe 	/*
184cd40cae2SJens Axboe 	 * "Normal" inline submissions always hold the uring_lock, since we
185cd40cae2SJens Axboe 	 * grab it from the system call. Same is true for the SQPOLL offload.
186cd40cae2SJens Axboe 	 * The only exception is when we've detached the request and issue it
187cd40cae2SJens Axboe 	 * from an async worker thread, grab the lock for that case.
188cd40cae2SJens Axboe 	 */
189cd40cae2SJens Axboe 	if (issue_flags & IO_URING_F_UNLOCKED)
190cd40cae2SJens Axboe 		mutex_lock(&ctx->uring_lock);
191cd40cae2SJens Axboe 	lockdep_assert_held(&ctx->uring_lock);
192cd40cae2SJens Axboe }
193cd40cae2SJens Axboe 
194f9ead18cSJens Axboe static inline void io_commit_cqring(struct io_ring_ctx *ctx)
195f9ead18cSJens Axboe {
196f9ead18cSJens Axboe 	/* order cqe stores with ring update */
197f9ead18cSJens Axboe 	smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
198f9ead18cSJens Axboe }
199f9ead18cSJens Axboe 
200f3b44f92SJens Axboe static inline void io_cqring_wake(struct io_ring_ctx *ctx)
201f3b44f92SJens Axboe {
202f3b44f92SJens Axboe 	/*
203f3b44f92SJens Axboe 	 * wake_up_all() may seem excessive, but io_wake_function() and
204f3b44f92SJens Axboe 	 * io_should_wake() handle the termination of the loop and only
205f3b44f92SJens Axboe 	 * wake as many waiters as we need to.
206f3b44f92SJens Axboe 	 */
207f3b44f92SJens Axboe 	if (wq_has_sleeper(&ctx->cq_wait))
208f3b44f92SJens Axboe 		wake_up_all(&ctx->cq_wait);
209f3b44f92SJens Axboe }
210f3b44f92SJens Axboe 
21117437f31SJens Axboe static inline bool io_sqring_full(struct io_ring_ctx *ctx)
21217437f31SJens Axboe {
21317437f31SJens Axboe 	struct io_rings *r = ctx->rings;
21417437f31SJens Axboe 
21517437f31SJens Axboe 	return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
21617437f31SJens Axboe }
21717437f31SJens Axboe 
21817437f31SJens Axboe static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
21917437f31SJens Axboe {
22017437f31SJens Axboe 	struct io_rings *rings = ctx->rings;
22117437f31SJens Axboe 
22217437f31SJens Axboe 	/* make sure SQ entry isn't read before tail */
22317437f31SJens Axboe 	return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
22417437f31SJens Axboe }
22517437f31SJens Axboe 
226c0e0d6baSDylan Yudaken static inline int io_run_task_work(void)
22717437f31SJens Axboe {
228625d38b3SPavel Begunkov 	if (test_thread_flag(TIF_NOTIFY_SIGNAL)) {
22917437f31SJens Axboe 		__set_current_state(TASK_RUNNING);
23017437f31SJens Axboe 		clear_notify_signal();
23117437f31SJens Axboe 		if (task_work_pending(current))
23217437f31SJens Axboe 			task_work_run();
233c0e0d6baSDylan Yudaken 		return 1;
23417437f31SJens Axboe 	}
23517437f31SJens Axboe 
236c0e0d6baSDylan Yudaken 	return 0;
237c0e0d6baSDylan Yudaken }
238c0e0d6baSDylan Yudaken 
239dac6a0eaSJens Axboe static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
240dac6a0eaSJens Axboe {
241dac6a0eaSJens Axboe 	return test_thread_flag(TIF_NOTIFY_SIGNAL) ||
242dac6a0eaSJens Axboe 		!wq_list_empty(&ctx->work_llist);
243dac6a0eaSJens Axboe }
244dac6a0eaSJens Axboe 
245c0e0d6baSDylan Yudaken static inline int io_run_task_work_ctx(struct io_ring_ctx *ctx)
246c0e0d6baSDylan Yudaken {
247c0e0d6baSDylan Yudaken 	int ret = 0;
248c0e0d6baSDylan Yudaken 	int ret2;
249c0e0d6baSDylan Yudaken 
250c0e0d6baSDylan Yudaken 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
251c0e0d6baSDylan Yudaken 		ret = io_run_local_work(ctx);
252c0e0d6baSDylan Yudaken 
253c0e0d6baSDylan Yudaken 	/* want to run this after in case more is added */
254c0e0d6baSDylan Yudaken 	ret2 = io_run_task_work();
255c0e0d6baSDylan Yudaken 
256c0e0d6baSDylan Yudaken 	/* Try propagate error in favour of if tasks were run,
257c0e0d6baSDylan Yudaken 	 * but still make sure to run them if requested
258c0e0d6baSDylan Yudaken 	 */
259c0e0d6baSDylan Yudaken 	if (ret >= 0)
260c0e0d6baSDylan Yudaken 		ret += ret2;
261c0e0d6baSDylan Yudaken 
262c0e0d6baSDylan Yudaken 	return ret;
26317437f31SJens Axboe }
26417437f31SJens Axboe 
265aa1e90f6SPavel Begunkov static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
266aa1e90f6SPavel Begunkov {
267aa1e90f6SPavel Begunkov 	if (!*locked) {
268aa1e90f6SPavel Begunkov 		mutex_lock(&ctx->uring_lock);
269aa1e90f6SPavel Begunkov 		*locked = true;
270aa1e90f6SPavel Begunkov 	}
271aa1e90f6SPavel Begunkov }
272aa1e90f6SPavel Begunkov 
2739da070b1SPavel Begunkov /*
2749da070b1SPavel Begunkov  * Don't complete immediately but use deferred completion infrastructure.
2759da070b1SPavel Begunkov  * Protected by ->uring_lock and can only be used either with
2769da070b1SPavel Begunkov  * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
2779da070b1SPavel Begunkov  */
2789da070b1SPavel Begunkov static inline void io_req_complete_defer(struct io_kiocb *req)
2799da070b1SPavel Begunkov 	__must_hold(&req->ctx->uring_lock)
280aa1e90f6SPavel Begunkov {
281aa1e90f6SPavel Begunkov 	struct io_submit_state *state = &req->ctx->submit_state;
282aa1e90f6SPavel Begunkov 
2839da070b1SPavel Begunkov 	lockdep_assert_held(&req->ctx->uring_lock);
2849da070b1SPavel Begunkov 
285aa1e90f6SPavel Begunkov 	wq_list_add_tail(&req->comp_list, &state->compl_reqs);
286aa1e90f6SPavel Begunkov }
287aa1e90f6SPavel Begunkov 
28846929b08SPavel Begunkov static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
28946929b08SPavel Begunkov {
29046929b08SPavel Begunkov 	if (unlikely(ctx->off_timeout_used || ctx->drain_active || ctx->has_evfd))
29146929b08SPavel Begunkov 		__io_commit_cqring_flush(ctx);
29246929b08SPavel Begunkov }
29346929b08SPavel Begunkov 
294e70cb608SPavel Begunkov /* must to be called somewhat shortly after putting a request */
295e70cb608SPavel Begunkov static inline void io_put_task(struct task_struct *task, int nr)
296e70cb608SPavel Begunkov {
297e70cb608SPavel Begunkov 	if (likely(task == current))
298e70cb608SPavel Begunkov 		task->io_uring->cached_refs += nr;
299e70cb608SPavel Begunkov 	else
300e70cb608SPavel Begunkov 		__io_put_task(task, nr);
301e70cb608SPavel Begunkov }
302e70cb608SPavel Begunkov 
30363809137SPavel Begunkov static inline void io_get_task_refs(int nr)
30463809137SPavel Begunkov {
30563809137SPavel Begunkov 	struct io_uring_task *tctx = current->io_uring;
30663809137SPavel Begunkov 
30763809137SPavel Begunkov 	tctx->cached_refs -= nr;
30863809137SPavel Begunkov 	if (unlikely(tctx->cached_refs < 0))
30963809137SPavel Begunkov 		io_task_refs_refill(tctx);
31063809137SPavel Begunkov }
31163809137SPavel Begunkov 
312bd1a3783SPavel Begunkov static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
313bd1a3783SPavel Begunkov {
314bd1a3783SPavel Begunkov 	return !ctx->submit_state.free_list.next;
315bd1a3783SPavel Begunkov }
316bd1a3783SPavel Begunkov 
317bd1a3783SPavel Begunkov static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx)
318bd1a3783SPavel Begunkov {
319bd1a3783SPavel Begunkov 	if (unlikely(io_req_cache_empty(ctx)))
320bd1a3783SPavel Begunkov 		return __io_alloc_req_refill(ctx);
321bd1a3783SPavel Begunkov 	return true;
322bd1a3783SPavel Begunkov }
323bd1a3783SPavel Begunkov 
324bd1a3783SPavel Begunkov static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
325bd1a3783SPavel Begunkov {
326bd1a3783SPavel Begunkov 	struct io_wq_work_node *node;
327bd1a3783SPavel Begunkov 
328bd1a3783SPavel Begunkov 	node = wq_stack_extract(&ctx->submit_state.free_list);
329bd1a3783SPavel Begunkov 	return container_of(node, struct io_kiocb, comp_list);
330bd1a3783SPavel Begunkov }
331bd1a3783SPavel Begunkov 
33276de6749SPavel Begunkov static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
33376de6749SPavel Begunkov {
334*6567506bSPavel Begunkov 	return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) ||
335*6567506bSPavel Begunkov 		      ctx->submitter_task == current);
33676de6749SPavel Begunkov }
33776de6749SPavel Begunkov 
338de23077eSJens Axboe #endif
339