xref: /openbmc/linux/io_uring/io_uring.h (revision e52d2e58)
1de23077eSJens Axboe #ifndef IOU_CORE_H
2de23077eSJens Axboe #define IOU_CORE_H
3de23077eSJens Axboe 
4de23077eSJens Axboe #include <linux/errno.h>
5cd40cae2SJens Axboe #include <linux/lockdep.h>
6ab1c84d8SPavel Begunkov #include <linux/io_uring_types.h>
7ab1c84d8SPavel Begunkov #include "io-wq.h"
8a6b21fbbSPavel Begunkov #include "slist.h"
9ab1c84d8SPavel Begunkov #include "filetable.h"
10de23077eSJens Axboe 
11f3b44f92SJens Axboe #ifndef CREATE_TRACE_POINTS
12f3b44f92SJens Axboe #include <trace/events/io_uring.h>
13f3b44f92SJens Axboe #endif
14f3b44f92SJens Axboe 
1597b388d7SJens Axboe enum {
1697b388d7SJens Axboe 	IOU_OK			= 0,
1797b388d7SJens Axboe 	IOU_ISSUE_SKIP_COMPLETE	= -EIOCBQUEUED,
18114eccdfSDylan Yudaken 
19114eccdfSDylan Yudaken 	/*
2091482864SPavel Begunkov 	 * Intended only when both IO_URING_F_MULTISHOT is passed
2191482864SPavel Begunkov 	 * to indicate to the poll runner that multishot should be
22114eccdfSDylan Yudaken 	 * removed and the result is set on req->cqe.res.
23114eccdfSDylan Yudaken 	 */
24114eccdfSDylan Yudaken 	IOU_STOP_MULTISHOT	= -ECANCELED,
2597b388d7SJens Axboe };
2697b388d7SJens Axboe 
27aa1df3a3SPavel Begunkov struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow);
2868494a65SPavel Begunkov bool io_req_cqe_overflow(struct io_kiocb *req);
29c0e0d6baSDylan Yudaken int io_run_task_work_sig(struct io_ring_ctx *ctx);
30b3026767SDylan Yudaken int __io_run_local_work(struct io_ring_ctx *ctx, bool *locked);
31c0e0d6baSDylan Yudaken int io_run_local_work(struct io_ring_ctx *ctx);
329046c641SPavel Begunkov void io_req_complete_failed(struct io_kiocb *req, s32 res);
339046c641SPavel Begunkov void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
349046c641SPavel Begunkov void io_req_complete_post(struct io_kiocb *req);
359046c641SPavel Begunkov void __io_req_complete_post(struct io_kiocb *req);
3652120f0fSDylan Yudaken bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
3752120f0fSDylan Yudaken 		     bool allow_overflow);
38eb42cebbSPavel Begunkov bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
39eb42cebbSPavel Begunkov 		     bool allow_overflow);
409046c641SPavel Begunkov void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
419046c641SPavel Begunkov 
429046c641SPavel Begunkov struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
439046c641SPavel Begunkov 
449046c641SPavel Begunkov struct file *io_file_get_normal(struct io_kiocb *req, int fd);
459046c641SPavel Begunkov struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
469046c641SPavel Begunkov 			       unsigned issue_flags);
479046c641SPavel Begunkov 
48f6b543fdSJens Axboe static inline bool io_req_ffs_set(struct io_kiocb *req)
49f6b543fdSJens Axboe {
50f6b543fdSJens Axboe 	return req->flags & REQ_F_FIXED_FILE;
51f6b543fdSJens Axboe }
52f6b543fdSJens Axboe 
53*e52d2e58SPavel Begunkov void __io_req_task_work_add(struct io_kiocb *req, bool allow_local);
549046c641SPavel Begunkov bool io_is_uring_fops(struct file *file);
559046c641SPavel Begunkov bool io_alloc_async_data(struct io_kiocb *req);
569046c641SPavel Begunkov void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags);
579046c641SPavel Begunkov void io_req_task_queue(struct io_kiocb *req);
589046c641SPavel Begunkov void io_queue_iowq(struct io_kiocb *req, bool *dont_use);
599046c641SPavel Begunkov void io_req_task_complete(struct io_kiocb *req, bool *locked);
609046c641SPavel Begunkov void io_req_task_queue_fail(struct io_kiocb *req, int ret);
619046c641SPavel Begunkov void io_req_task_submit(struct io_kiocb *req, bool *locked);
629046c641SPavel Begunkov void tctx_task_work(struct callback_head *cb);
639046c641SPavel Begunkov __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
649046c641SPavel Begunkov int io_uring_alloc_task_context(struct task_struct *task,
659046c641SPavel Begunkov 				struct io_ring_ctx *ctx);
669046c641SPavel Begunkov 
679046c641SPavel Begunkov int io_poll_issue(struct io_kiocb *req, bool *locked);
689046c641SPavel Begunkov int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
699046c641SPavel Begunkov int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
709046c641SPavel Begunkov void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node);
719046c641SPavel Begunkov int io_req_prep_async(struct io_kiocb *req);
729046c641SPavel Begunkov 
739046c641SPavel Begunkov struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
749046c641SPavel Begunkov void io_wq_submit_work(struct io_wq_work *work);
759046c641SPavel Begunkov 
769046c641SPavel Begunkov void io_free_req(struct io_kiocb *req);
779046c641SPavel Begunkov void io_queue_next(struct io_kiocb *req);
78e70cb608SPavel Begunkov void __io_put_task(struct task_struct *task, int nr);
7963809137SPavel Begunkov void io_task_refs_refill(struct io_uring_task *tctx);
80bd1a3783SPavel Begunkov bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
819046c641SPavel Begunkov 
829046c641SPavel Begunkov bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
839046c641SPavel Begunkov 			bool cancel_all);
849046c641SPavel Begunkov 
85*e52d2e58SPavel Begunkov static inline void io_req_task_work_add(struct io_kiocb *req)
86*e52d2e58SPavel Begunkov {
87*e52d2e58SPavel Begunkov 	__io_req_task_work_add(req, true);
88*e52d2e58SPavel Begunkov }
89*e52d2e58SPavel Begunkov 
909046c641SPavel Begunkov #define io_for_each_link(pos, head) \
919046c641SPavel Begunkov 	for (pos = (head); pos; pos = pos->link)
92f3b44f92SJens Axboe 
9325399321SPavel Begunkov static inline void io_cq_lock(struct io_ring_ctx *ctx)
9425399321SPavel Begunkov 	__acquires(ctx->completion_lock)
9525399321SPavel Begunkov {
9625399321SPavel Begunkov 	spin_lock(&ctx->completion_lock);
9725399321SPavel Begunkov }
9825399321SPavel Begunkov 
9925399321SPavel Begunkov void io_cq_unlock_post(struct io_ring_ctx *ctx);
10025399321SPavel Begunkov 
101aa1df3a3SPavel Begunkov static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,
102aa1df3a3SPavel Begunkov 						       bool overflow)
103f3b44f92SJens Axboe {
104f3b44f92SJens Axboe 	if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
105f3b44f92SJens Axboe 		struct io_uring_cqe *cqe = ctx->cqe_cached;
106f3b44f92SJens Axboe 
107f3b44f92SJens Axboe 		ctx->cached_cq_tail++;
108f3b44f92SJens Axboe 		ctx->cqe_cached++;
109b3659a65SPavel Begunkov 		if (ctx->flags & IORING_SETUP_CQE32)
110b3659a65SPavel Begunkov 			ctx->cqe_cached++;
111f3b44f92SJens Axboe 		return cqe;
112f3b44f92SJens Axboe 	}
113f3b44f92SJens Axboe 
114aa1df3a3SPavel Begunkov 	return __io_get_cqe(ctx, overflow);
115aa1df3a3SPavel Begunkov }
116aa1df3a3SPavel Begunkov 
117aa1df3a3SPavel Begunkov static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
118aa1df3a3SPavel Begunkov {
119aa1df3a3SPavel Begunkov 	return io_get_cqe_overflow(ctx, false);
120f3b44f92SJens Axboe }
121f3b44f92SJens Axboe 
122f3b44f92SJens Axboe static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
123f3b44f92SJens Axboe 				     struct io_kiocb *req)
124f3b44f92SJens Axboe {
125f3b44f92SJens Axboe 	struct io_uring_cqe *cqe;
126f3b44f92SJens Axboe 
127f3b44f92SJens Axboe 	/*
128f3b44f92SJens Axboe 	 * If we can't get a cq entry, userspace overflowed the
129f3b44f92SJens Axboe 	 * submission (by quite a lot). Increment the overflow count in
130f3b44f92SJens Axboe 	 * the ring.
131f3b44f92SJens Axboe 	 */
132f3b44f92SJens Axboe 	cqe = io_get_cqe(ctx);
133e8c328c3SPavel Begunkov 	if (unlikely(!cqe))
134e8c328c3SPavel Begunkov 		return io_req_cqe_overflow(req);
135e0486f3fSDylan Yudaken 
136e0486f3fSDylan Yudaken 	trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
137e0486f3fSDylan Yudaken 				req->cqe.res, req->cqe.flags,
138e0486f3fSDylan Yudaken 				(req->flags & REQ_F_CQE32_INIT) ? req->extra1 : 0,
139e0486f3fSDylan Yudaken 				(req->flags & REQ_F_CQE32_INIT) ? req->extra2 : 0);
140e0486f3fSDylan Yudaken 
141f3b44f92SJens Axboe 	memcpy(cqe, &req->cqe, sizeof(*cqe));
142e8c328c3SPavel Begunkov 
143e8c328c3SPavel Begunkov 	if (ctx->flags & IORING_SETUP_CQE32) {
144f3b44f92SJens Axboe 		u64 extra1 = 0, extra2 = 0;
145f3b44f92SJens Axboe 
146f3b44f92SJens Axboe 		if (req->flags & REQ_F_CQE32_INIT) {
147f3b44f92SJens Axboe 			extra1 = req->extra1;
148f3b44f92SJens Axboe 			extra2 = req->extra2;
149f3b44f92SJens Axboe 		}
150f3b44f92SJens Axboe 
151f3b44f92SJens Axboe 		WRITE_ONCE(cqe->big_cqe[0], extra1);
152f3b44f92SJens Axboe 		WRITE_ONCE(cqe->big_cqe[1], extra2);
153e8c328c3SPavel Begunkov 	}
154f3b44f92SJens Axboe 	return true;
155f3b44f92SJens Axboe }
156f3b44f92SJens Axboe 
157531113bbSJens Axboe static inline void req_set_fail(struct io_kiocb *req)
158531113bbSJens Axboe {
159531113bbSJens Axboe 	req->flags |= REQ_F_FAIL;
160531113bbSJens Axboe 	if (req->flags & REQ_F_CQE_SKIP) {
161531113bbSJens Axboe 		req->flags &= ~REQ_F_CQE_SKIP;
162531113bbSJens Axboe 		req->flags |= REQ_F_SKIP_LINK_CQES;
163531113bbSJens Axboe 	}
164531113bbSJens Axboe }
165531113bbSJens Axboe 
166de23077eSJens Axboe static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
167de23077eSJens Axboe {
168de23077eSJens Axboe 	req->cqe.res = res;
169de23077eSJens Axboe 	req->cqe.flags = cflags;
170de23077eSJens Axboe }
171de23077eSJens Axboe 
17299f15d8dSJens Axboe static inline bool req_has_async_data(struct io_kiocb *req)
17399f15d8dSJens Axboe {
17499f15d8dSJens Axboe 	return req->flags & REQ_F_ASYNC_DATA;
17599f15d8dSJens Axboe }
17699f15d8dSJens Axboe 
177531113bbSJens Axboe static inline void io_put_file(struct file *file)
178531113bbSJens Axboe {
179531113bbSJens Axboe 	if (file)
180531113bbSJens Axboe 		fput(file);
181531113bbSJens Axboe }
182531113bbSJens Axboe 
183cd40cae2SJens Axboe static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
184cd40cae2SJens Axboe 					 unsigned issue_flags)
185cd40cae2SJens Axboe {
186cd40cae2SJens Axboe 	lockdep_assert_held(&ctx->uring_lock);
187cd40cae2SJens Axboe 	if (issue_flags & IO_URING_F_UNLOCKED)
188cd40cae2SJens Axboe 		mutex_unlock(&ctx->uring_lock);
189cd40cae2SJens Axboe }
190cd40cae2SJens Axboe 
191cd40cae2SJens Axboe static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
192cd40cae2SJens Axboe 				       unsigned issue_flags)
193cd40cae2SJens Axboe {
194cd40cae2SJens Axboe 	/*
195cd40cae2SJens Axboe 	 * "Normal" inline submissions always hold the uring_lock, since we
196cd40cae2SJens Axboe 	 * grab it from the system call. Same is true for the SQPOLL offload.
197cd40cae2SJens Axboe 	 * The only exception is when we've detached the request and issue it
198cd40cae2SJens Axboe 	 * from an async worker thread, grab the lock for that case.
199cd40cae2SJens Axboe 	 */
200cd40cae2SJens Axboe 	if (issue_flags & IO_URING_F_UNLOCKED)
201cd40cae2SJens Axboe 		mutex_lock(&ctx->uring_lock);
202cd40cae2SJens Axboe 	lockdep_assert_held(&ctx->uring_lock);
203cd40cae2SJens Axboe }
204cd40cae2SJens Axboe 
205f9ead18cSJens Axboe static inline void io_commit_cqring(struct io_ring_ctx *ctx)
206f9ead18cSJens Axboe {
207f9ead18cSJens Axboe 	/* order cqe stores with ring update */
208f9ead18cSJens Axboe 	smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
209f9ead18cSJens Axboe }
210f9ead18cSJens Axboe 
211fc86f9d3SPavel Begunkov /* requires smb_mb() prior, see wq_has_sleeper() */
212fc86f9d3SPavel Begunkov static inline void __io_cqring_wake(struct io_ring_ctx *ctx)
213f3b44f92SJens Axboe {
214f3b44f92SJens Axboe 	/*
215f3b44f92SJens Axboe 	 * wake_up_all() may seem excessive, but io_wake_function() and
216f3b44f92SJens Axboe 	 * io_should_wake() handle the termination of the loop and only
217f3b44f92SJens Axboe 	 * wake as many waiters as we need to.
218f3b44f92SJens Axboe 	 */
219fc86f9d3SPavel Begunkov 	if (waitqueue_active(&ctx->cq_wait))
220f3b44f92SJens Axboe 		wake_up_all(&ctx->cq_wait);
221f3b44f92SJens Axboe }
222f3b44f92SJens Axboe 
223fc86f9d3SPavel Begunkov static inline void io_cqring_wake(struct io_ring_ctx *ctx)
224fc86f9d3SPavel Begunkov {
225fc86f9d3SPavel Begunkov 	smp_mb();
226fc86f9d3SPavel Begunkov 	__io_cqring_wake(ctx);
227fc86f9d3SPavel Begunkov }
228fc86f9d3SPavel Begunkov 
22917437f31SJens Axboe static inline bool io_sqring_full(struct io_ring_ctx *ctx)
23017437f31SJens Axboe {
23117437f31SJens Axboe 	struct io_rings *r = ctx->rings;
23217437f31SJens Axboe 
23317437f31SJens Axboe 	return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
23417437f31SJens Axboe }
23517437f31SJens Axboe 
23617437f31SJens Axboe static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
23717437f31SJens Axboe {
23817437f31SJens Axboe 	struct io_rings *rings = ctx->rings;
23917437f31SJens Axboe 
24017437f31SJens Axboe 	/* make sure SQ entry isn't read before tail */
24117437f31SJens Axboe 	return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
24217437f31SJens Axboe }
24317437f31SJens Axboe 
244c0e0d6baSDylan Yudaken static inline int io_run_task_work(void)
24517437f31SJens Axboe {
24646a525e1SJens Axboe 	if (task_work_pending(current)) {
24746a525e1SJens Axboe 		if (test_thread_flag(TIF_NOTIFY_SIGNAL))
24817437f31SJens Axboe 			clear_notify_signal();
24946a525e1SJens Axboe 		__set_current_state(TASK_RUNNING);
25017437f31SJens Axboe 		task_work_run();
251c0e0d6baSDylan Yudaken 		return 1;
25217437f31SJens Axboe 	}
25317437f31SJens Axboe 
254c0e0d6baSDylan Yudaken 	return 0;
255c0e0d6baSDylan Yudaken }
256c0e0d6baSDylan Yudaken 
257dac6a0eaSJens Axboe static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
258dac6a0eaSJens Axboe {
259dac6a0eaSJens Axboe 	return test_thread_flag(TIF_NOTIFY_SIGNAL) ||
260dac6a0eaSJens Axboe 		!wq_list_empty(&ctx->work_llist);
261dac6a0eaSJens Axboe }
262dac6a0eaSJens Axboe 
263c0e0d6baSDylan Yudaken static inline int io_run_task_work_ctx(struct io_ring_ctx *ctx)
264c0e0d6baSDylan Yudaken {
265c0e0d6baSDylan Yudaken 	int ret = 0;
266c0e0d6baSDylan Yudaken 	int ret2;
267c0e0d6baSDylan Yudaken 
268c0e0d6baSDylan Yudaken 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
269c0e0d6baSDylan Yudaken 		ret = io_run_local_work(ctx);
270c0e0d6baSDylan Yudaken 
271c0e0d6baSDylan Yudaken 	/* want to run this after in case more is added */
272c0e0d6baSDylan Yudaken 	ret2 = io_run_task_work();
273c0e0d6baSDylan Yudaken 
274c0e0d6baSDylan Yudaken 	/* Try propagate error in favour of if tasks were run,
275c0e0d6baSDylan Yudaken 	 * but still make sure to run them if requested
276c0e0d6baSDylan Yudaken 	 */
277c0e0d6baSDylan Yudaken 	if (ret >= 0)
278c0e0d6baSDylan Yudaken 		ret += ret2;
279c0e0d6baSDylan Yudaken 
280c0e0d6baSDylan Yudaken 	return ret;
28117437f31SJens Axboe }
28217437f31SJens Axboe 
28344f87745SPavel Begunkov static inline int io_run_local_work_locked(struct io_ring_ctx *ctx)
28444f87745SPavel Begunkov {
285b3026767SDylan Yudaken 	bool locked;
286b3026767SDylan Yudaken 	int ret;
287b3026767SDylan Yudaken 
28844f87745SPavel Begunkov 	if (llist_empty(&ctx->work_llist))
28944f87745SPavel Begunkov 		return 0;
290b3026767SDylan Yudaken 
291b3026767SDylan Yudaken 	locked = true;
292b3026767SDylan Yudaken 	ret = __io_run_local_work(ctx, &locked);
293b3026767SDylan Yudaken 	/* shouldn't happen! */
294b3026767SDylan Yudaken 	if (WARN_ON_ONCE(!locked))
295b3026767SDylan Yudaken 		mutex_lock(&ctx->uring_lock);
296b3026767SDylan Yudaken 	return ret;
29744f87745SPavel Begunkov }
29844f87745SPavel Begunkov 
299aa1e90f6SPavel Begunkov static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
300aa1e90f6SPavel Begunkov {
301aa1e90f6SPavel Begunkov 	if (!*locked) {
302aa1e90f6SPavel Begunkov 		mutex_lock(&ctx->uring_lock);
303aa1e90f6SPavel Begunkov 		*locked = true;
304aa1e90f6SPavel Begunkov 	}
305aa1e90f6SPavel Begunkov }
306aa1e90f6SPavel Begunkov 
3079da070b1SPavel Begunkov /*
3089da070b1SPavel Begunkov  * Don't complete immediately but use deferred completion infrastructure.
3099da070b1SPavel Begunkov  * Protected by ->uring_lock and can only be used either with
3109da070b1SPavel Begunkov  * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
3119da070b1SPavel Begunkov  */
3129da070b1SPavel Begunkov static inline void io_req_complete_defer(struct io_kiocb *req)
3139da070b1SPavel Begunkov 	__must_hold(&req->ctx->uring_lock)
314aa1e90f6SPavel Begunkov {
315aa1e90f6SPavel Begunkov 	struct io_submit_state *state = &req->ctx->submit_state;
316aa1e90f6SPavel Begunkov 
3179da070b1SPavel Begunkov 	lockdep_assert_held(&req->ctx->uring_lock);
3189da070b1SPavel Begunkov 
319aa1e90f6SPavel Begunkov 	wq_list_add_tail(&req->comp_list, &state->compl_reqs);
320aa1e90f6SPavel Begunkov }
321aa1e90f6SPavel Begunkov 
32246929b08SPavel Begunkov static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
32346929b08SPavel Begunkov {
32446929b08SPavel Begunkov 	if (unlikely(ctx->off_timeout_used || ctx->drain_active || ctx->has_evfd))
32546929b08SPavel Begunkov 		__io_commit_cqring_flush(ctx);
32646929b08SPavel Begunkov }
32746929b08SPavel Begunkov 
328e70cb608SPavel Begunkov /* must to be called somewhat shortly after putting a request */
329e70cb608SPavel Begunkov static inline void io_put_task(struct task_struct *task, int nr)
330e70cb608SPavel Begunkov {
331e70cb608SPavel Begunkov 	if (likely(task == current))
332e70cb608SPavel Begunkov 		task->io_uring->cached_refs += nr;
333e70cb608SPavel Begunkov 	else
334e70cb608SPavel Begunkov 		__io_put_task(task, nr);
335e70cb608SPavel Begunkov }
336e70cb608SPavel Begunkov 
33763809137SPavel Begunkov static inline void io_get_task_refs(int nr)
33863809137SPavel Begunkov {
33963809137SPavel Begunkov 	struct io_uring_task *tctx = current->io_uring;
34063809137SPavel Begunkov 
34163809137SPavel Begunkov 	tctx->cached_refs -= nr;
34263809137SPavel Begunkov 	if (unlikely(tctx->cached_refs < 0))
34363809137SPavel Begunkov 		io_task_refs_refill(tctx);
34463809137SPavel Begunkov }
34563809137SPavel Begunkov 
346bd1a3783SPavel Begunkov static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
347bd1a3783SPavel Begunkov {
348bd1a3783SPavel Begunkov 	return !ctx->submit_state.free_list.next;
349bd1a3783SPavel Begunkov }
350bd1a3783SPavel Begunkov 
351bd1a3783SPavel Begunkov static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx)
352bd1a3783SPavel Begunkov {
353bd1a3783SPavel Begunkov 	if (unlikely(io_req_cache_empty(ctx)))
354bd1a3783SPavel Begunkov 		return __io_alloc_req_refill(ctx);
355bd1a3783SPavel Begunkov 	return true;
356bd1a3783SPavel Begunkov }
357bd1a3783SPavel Begunkov 
358bd1a3783SPavel Begunkov static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
359bd1a3783SPavel Begunkov {
360bd1a3783SPavel Begunkov 	struct io_wq_work_node *node;
361bd1a3783SPavel Begunkov 
362bd1a3783SPavel Begunkov 	node = wq_stack_extract(&ctx->submit_state.free_list);
363bd1a3783SPavel Begunkov 	return container_of(node, struct io_kiocb, comp_list);
364bd1a3783SPavel Begunkov }
365bd1a3783SPavel Begunkov 
36676de6749SPavel Begunkov static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
36776de6749SPavel Begunkov {
3686567506bSPavel Begunkov 	return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) ||
3696567506bSPavel Begunkov 		      ctx->submitter_task == current);
37076de6749SPavel Begunkov }
37176de6749SPavel Begunkov 
372de23077eSJens Axboe #endif
373