xref: /openbmc/linux/io_uring/io_uring.h (revision e0486f3f)
1 #ifndef IOU_CORE_H
2 #define IOU_CORE_H
3 
4 #include <linux/errno.h>
5 #include <linux/lockdep.h>
6 #include <linux/io_uring_types.h>
7 #include "io-wq.h"
8 #include "slist.h"
9 #include "filetable.h"
10 
11 #ifndef CREATE_TRACE_POINTS
12 #include <trace/events/io_uring.h>
13 #endif
14 
15 enum {
16 	IOU_OK			= 0,
17 	IOU_ISSUE_SKIP_COMPLETE	= -EIOCBQUEUED,
18 
19 	/*
20 	 * Intended only when both REQ_F_POLLED and REQ_F_APOLL_MULTISHOT
21 	 * are set to indicate to the poll runner that multishot should be
22 	 * removed and the result is set on req->cqe.res.
23 	 */
24 	IOU_STOP_MULTISHOT	= -ECANCELED,
25 };
26 
27 struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx);
28 bool io_req_cqe_overflow(struct io_kiocb *req);
29 int io_run_task_work_sig(void);
30 void io_req_complete_failed(struct io_kiocb *req, s32 res);
31 void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
32 void io_req_complete_post(struct io_kiocb *req);
33 void __io_req_complete_post(struct io_kiocb *req);
34 bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
35 		     bool allow_overflow);
36 void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
37 
38 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
39 
40 struct file *io_file_get_normal(struct io_kiocb *req, int fd);
41 struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
42 			       unsigned issue_flags);
43 
44 bool io_is_uring_fops(struct file *file);
45 bool io_alloc_async_data(struct io_kiocb *req);
46 void io_req_task_work_add(struct io_kiocb *req);
47 void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags);
48 void io_req_task_queue(struct io_kiocb *req);
49 void io_queue_iowq(struct io_kiocb *req, bool *dont_use);
50 void io_req_task_complete(struct io_kiocb *req, bool *locked);
51 void io_req_task_queue_fail(struct io_kiocb *req, int ret);
52 void io_req_task_submit(struct io_kiocb *req, bool *locked);
53 void tctx_task_work(struct callback_head *cb);
54 __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
55 int io_uring_alloc_task_context(struct task_struct *task,
56 				struct io_ring_ctx *ctx);
57 
58 int io_poll_issue(struct io_kiocb *req, bool *locked);
59 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
60 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
61 void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node);
62 int io_req_prep_async(struct io_kiocb *req);
63 
64 struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
65 void io_wq_submit_work(struct io_wq_work *work);
66 
67 void io_free_req(struct io_kiocb *req);
68 void io_queue_next(struct io_kiocb *req);
69 
70 bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
71 			bool cancel_all);
72 
73 #define io_for_each_link(pos, head) \
74 	for (pos = (head); pos; pos = pos->link)
75 
76 static inline void io_cq_lock(struct io_ring_ctx *ctx)
77 	__acquires(ctx->completion_lock)
78 {
79 	spin_lock(&ctx->completion_lock);
80 }
81 
82 void io_cq_unlock_post(struct io_ring_ctx *ctx);
83 
84 static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
85 {
86 	if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
87 		struct io_uring_cqe *cqe = ctx->cqe_cached;
88 
89 		ctx->cached_cq_tail++;
90 		ctx->cqe_cached++;
91 		if (ctx->flags & IORING_SETUP_CQE32)
92 			ctx->cqe_cached++;
93 		return cqe;
94 	}
95 
96 	return __io_get_cqe(ctx);
97 }
98 
99 static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
100 				     struct io_kiocb *req)
101 {
102 	struct io_uring_cqe *cqe;
103 
104 	/*
105 	 * If we can't get a cq entry, userspace overflowed the
106 	 * submission (by quite a lot). Increment the overflow count in
107 	 * the ring.
108 	 */
109 	cqe = io_get_cqe(ctx);
110 	if (unlikely(!cqe))
111 		return io_req_cqe_overflow(req);
112 
113 	trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
114 				req->cqe.res, req->cqe.flags,
115 				(req->flags & REQ_F_CQE32_INIT) ? req->extra1 : 0,
116 				(req->flags & REQ_F_CQE32_INIT) ? req->extra2 : 0);
117 
118 	memcpy(cqe, &req->cqe, sizeof(*cqe));
119 
120 	if (ctx->flags & IORING_SETUP_CQE32) {
121 		u64 extra1 = 0, extra2 = 0;
122 
123 		if (req->flags & REQ_F_CQE32_INIT) {
124 			extra1 = req->extra1;
125 			extra2 = req->extra2;
126 		}
127 
128 		WRITE_ONCE(cqe->big_cqe[0], extra1);
129 		WRITE_ONCE(cqe->big_cqe[1], extra2);
130 	}
131 	return true;
132 }
133 
134 static inline void req_set_fail(struct io_kiocb *req)
135 {
136 	req->flags |= REQ_F_FAIL;
137 	if (req->flags & REQ_F_CQE_SKIP) {
138 		req->flags &= ~REQ_F_CQE_SKIP;
139 		req->flags |= REQ_F_SKIP_LINK_CQES;
140 	}
141 }
142 
143 static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
144 {
145 	req->cqe.res = res;
146 	req->cqe.flags = cflags;
147 }
148 
149 static inline bool req_has_async_data(struct io_kiocb *req)
150 {
151 	return req->flags & REQ_F_ASYNC_DATA;
152 }
153 
154 static inline void io_put_file(struct file *file)
155 {
156 	if (file)
157 		fput(file);
158 }
159 
160 static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
161 					 unsigned issue_flags)
162 {
163 	lockdep_assert_held(&ctx->uring_lock);
164 	if (issue_flags & IO_URING_F_UNLOCKED)
165 		mutex_unlock(&ctx->uring_lock);
166 }
167 
168 static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
169 				       unsigned issue_flags)
170 {
171 	/*
172 	 * "Normal" inline submissions always hold the uring_lock, since we
173 	 * grab it from the system call. Same is true for the SQPOLL offload.
174 	 * The only exception is when we've detached the request and issue it
175 	 * from an async worker thread, grab the lock for that case.
176 	 */
177 	if (issue_flags & IO_URING_F_UNLOCKED)
178 		mutex_lock(&ctx->uring_lock);
179 	lockdep_assert_held(&ctx->uring_lock);
180 }
181 
182 static inline void io_commit_cqring(struct io_ring_ctx *ctx)
183 {
184 	/* order cqe stores with ring update */
185 	smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
186 }
187 
188 static inline void io_cqring_wake(struct io_ring_ctx *ctx)
189 {
190 	/*
191 	 * wake_up_all() may seem excessive, but io_wake_function() and
192 	 * io_should_wake() handle the termination of the loop and only
193 	 * wake as many waiters as we need to.
194 	 */
195 	if (wq_has_sleeper(&ctx->cq_wait))
196 		wake_up_all(&ctx->cq_wait);
197 }
198 
199 static inline bool io_sqring_full(struct io_ring_ctx *ctx)
200 {
201 	struct io_rings *r = ctx->rings;
202 
203 	return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
204 }
205 
206 static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
207 {
208 	struct io_rings *rings = ctx->rings;
209 
210 	/* make sure SQ entry isn't read before tail */
211 	return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
212 }
213 
214 static inline bool io_run_task_work(void)
215 {
216 	if (test_thread_flag(TIF_NOTIFY_SIGNAL)) {
217 		__set_current_state(TASK_RUNNING);
218 		clear_notify_signal();
219 		if (task_work_pending(current))
220 			task_work_run();
221 		return true;
222 	}
223 
224 	return false;
225 }
226 
227 static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
228 {
229 	if (!*locked) {
230 		mutex_lock(&ctx->uring_lock);
231 		*locked = true;
232 	}
233 }
234 
235 /*
236  * Don't complete immediately but use deferred completion infrastructure.
237  * Protected by ->uring_lock and can only be used either with
238  * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
239  */
240 static inline void io_req_complete_defer(struct io_kiocb *req)
241 	__must_hold(&req->ctx->uring_lock)
242 {
243 	struct io_submit_state *state = &req->ctx->submit_state;
244 
245 	lockdep_assert_held(&req->ctx->uring_lock);
246 
247 	wq_list_add_tail(&req->comp_list, &state->compl_reqs);
248 }
249 
250 static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
251 {
252 	if (unlikely(ctx->off_timeout_used || ctx->drain_active || ctx->has_evfd))
253 		__io_commit_cqring_flush(ctx);
254 }
255 
256 #endif
257