xref: /openbmc/linux/io_uring/io_uring.h (revision 61c6065e)
1 #ifndef IOU_CORE_H
2 #define IOU_CORE_H
3 
4 #include <linux/errno.h>
5 #include <linux/lockdep.h>
6 #include <linux/io_uring_types.h>
7 #include "io-wq.h"
8 #include "slist.h"
9 #include "filetable.h"
10 
11 #ifndef CREATE_TRACE_POINTS
12 #include <trace/events/io_uring.h>
13 #endif
14 
15 enum {
16 	IOU_OK			= 0,
17 	IOU_ISSUE_SKIP_COMPLETE	= -EIOCBQUEUED,
18 
19 	/*
20 	 * Intended only when both REQ_F_POLLED and REQ_F_APOLL_MULTISHOT
21 	 * are set to indicate to the poll runner that multishot should be
22 	 * removed and the result is set on req->cqe.res.
23 	 */
24 	IOU_STOP_MULTISHOT	= -ECANCELED,
25 };
26 
27 struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow);
28 bool io_req_cqe_overflow(struct io_kiocb *req);
29 int io_run_task_work_sig(struct io_ring_ctx *ctx);
30 int __io_run_local_work(struct io_ring_ctx *ctx, bool locked);
31 int io_run_local_work(struct io_ring_ctx *ctx);
32 void io_req_complete_failed(struct io_kiocb *req, s32 res);
33 void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
34 void io_req_complete_post(struct io_kiocb *req);
35 void __io_req_complete_post(struct io_kiocb *req);
36 bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
37 		     bool allow_overflow);
38 bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
39 		     bool allow_overflow);
40 void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
41 
42 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
43 
44 struct file *io_file_get_normal(struct io_kiocb *req, int fd);
45 struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
46 			       unsigned issue_flags);
47 
48 static inline bool io_req_ffs_set(struct io_kiocb *req)
49 {
50 	return req->flags & REQ_F_FIXED_FILE;
51 }
52 
53 bool io_is_uring_fops(struct file *file);
54 bool io_alloc_async_data(struct io_kiocb *req);
55 void io_req_task_work_add(struct io_kiocb *req);
56 void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags);
57 void io_req_task_queue(struct io_kiocb *req);
58 void io_queue_iowq(struct io_kiocb *req, bool *dont_use);
59 void io_req_task_complete(struct io_kiocb *req, bool *locked);
60 void io_req_task_queue_fail(struct io_kiocb *req, int ret);
61 void io_req_task_submit(struct io_kiocb *req, bool *locked);
62 void tctx_task_work(struct callback_head *cb);
63 __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
64 int io_uring_alloc_task_context(struct task_struct *task,
65 				struct io_ring_ctx *ctx);
66 
67 int io_poll_issue(struct io_kiocb *req, bool *locked);
68 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
69 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
70 void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node);
71 int io_req_prep_async(struct io_kiocb *req);
72 
73 struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
74 void io_wq_submit_work(struct io_wq_work *work);
75 
76 void io_free_req(struct io_kiocb *req);
77 void io_queue_next(struct io_kiocb *req);
78 void __io_put_task(struct task_struct *task, int nr);
79 void io_task_refs_refill(struct io_uring_task *tctx);
80 bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
81 
82 bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
83 			bool cancel_all);
84 
85 #define io_for_each_link(pos, head) \
86 	for (pos = (head); pos; pos = pos->link)
87 
88 static inline void io_cq_lock(struct io_ring_ctx *ctx)
89 	__acquires(ctx->completion_lock)
90 {
91 	spin_lock(&ctx->completion_lock);
92 }
93 
94 void io_cq_unlock_post(struct io_ring_ctx *ctx);
95 
96 static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,
97 						       bool overflow)
98 {
99 	if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
100 		struct io_uring_cqe *cqe = ctx->cqe_cached;
101 
102 		ctx->cached_cq_tail++;
103 		ctx->cqe_cached++;
104 		if (ctx->flags & IORING_SETUP_CQE32)
105 			ctx->cqe_cached++;
106 		return cqe;
107 	}
108 
109 	return __io_get_cqe(ctx, overflow);
110 }
111 
112 static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
113 {
114 	return io_get_cqe_overflow(ctx, false);
115 }
116 
117 static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
118 				     struct io_kiocb *req)
119 {
120 	struct io_uring_cqe *cqe;
121 
122 	/*
123 	 * If we can't get a cq entry, userspace overflowed the
124 	 * submission (by quite a lot). Increment the overflow count in
125 	 * the ring.
126 	 */
127 	cqe = io_get_cqe(ctx);
128 	if (unlikely(!cqe))
129 		return io_req_cqe_overflow(req);
130 
131 	trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
132 				req->cqe.res, req->cqe.flags,
133 				(req->flags & REQ_F_CQE32_INIT) ? req->extra1 : 0,
134 				(req->flags & REQ_F_CQE32_INIT) ? req->extra2 : 0);
135 
136 	memcpy(cqe, &req->cqe, sizeof(*cqe));
137 
138 	if (ctx->flags & IORING_SETUP_CQE32) {
139 		u64 extra1 = 0, extra2 = 0;
140 
141 		if (req->flags & REQ_F_CQE32_INIT) {
142 			extra1 = req->extra1;
143 			extra2 = req->extra2;
144 		}
145 
146 		WRITE_ONCE(cqe->big_cqe[0], extra1);
147 		WRITE_ONCE(cqe->big_cqe[1], extra2);
148 	}
149 	return true;
150 }
151 
152 static inline void req_set_fail(struct io_kiocb *req)
153 {
154 	req->flags |= REQ_F_FAIL;
155 	if (req->flags & REQ_F_CQE_SKIP) {
156 		req->flags &= ~REQ_F_CQE_SKIP;
157 		req->flags |= REQ_F_SKIP_LINK_CQES;
158 	}
159 }
160 
161 static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
162 {
163 	req->cqe.res = res;
164 	req->cqe.flags = cflags;
165 }
166 
167 static inline bool req_has_async_data(struct io_kiocb *req)
168 {
169 	return req->flags & REQ_F_ASYNC_DATA;
170 }
171 
172 static inline void io_put_file(struct file *file)
173 {
174 	if (file)
175 		fput(file);
176 }
177 
178 static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
179 					 unsigned issue_flags)
180 {
181 	lockdep_assert_held(&ctx->uring_lock);
182 	if (issue_flags & IO_URING_F_UNLOCKED)
183 		mutex_unlock(&ctx->uring_lock);
184 }
185 
186 static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
187 				       unsigned issue_flags)
188 {
189 	/*
190 	 * "Normal" inline submissions always hold the uring_lock, since we
191 	 * grab it from the system call. Same is true for the SQPOLL offload.
192 	 * The only exception is when we've detached the request and issue it
193 	 * from an async worker thread, grab the lock for that case.
194 	 */
195 	if (issue_flags & IO_URING_F_UNLOCKED)
196 		mutex_lock(&ctx->uring_lock);
197 	lockdep_assert_held(&ctx->uring_lock);
198 }
199 
200 static inline void io_commit_cqring(struct io_ring_ctx *ctx)
201 {
202 	/* order cqe stores with ring update */
203 	smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
204 }
205 
206 /* requires smb_mb() prior, see wq_has_sleeper() */
207 static inline void __io_cqring_wake(struct io_ring_ctx *ctx)
208 {
209 	/*
210 	 * wake_up_all() may seem excessive, but io_wake_function() and
211 	 * io_should_wake() handle the termination of the loop and only
212 	 * wake as many waiters as we need to.
213 	 */
214 	if (waitqueue_active(&ctx->cq_wait))
215 		wake_up_all(&ctx->cq_wait);
216 }
217 
218 static inline void io_cqring_wake(struct io_ring_ctx *ctx)
219 {
220 	smp_mb();
221 	__io_cqring_wake(ctx);
222 }
223 
224 static inline bool io_sqring_full(struct io_ring_ctx *ctx)
225 {
226 	struct io_rings *r = ctx->rings;
227 
228 	return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
229 }
230 
231 static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
232 {
233 	struct io_rings *rings = ctx->rings;
234 
235 	/* make sure SQ entry isn't read before tail */
236 	return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
237 }
238 
239 static inline int io_run_task_work(void)
240 {
241 	if (task_work_pending(current)) {
242 		if (test_thread_flag(TIF_NOTIFY_SIGNAL))
243 			clear_notify_signal();
244 		__set_current_state(TASK_RUNNING);
245 		task_work_run();
246 		return 1;
247 	}
248 
249 	return 0;
250 }
251 
252 static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
253 {
254 	return test_thread_flag(TIF_NOTIFY_SIGNAL) ||
255 		!wq_list_empty(&ctx->work_llist);
256 }
257 
258 static inline int io_run_task_work_ctx(struct io_ring_ctx *ctx)
259 {
260 	int ret = 0;
261 	int ret2;
262 
263 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
264 		ret = io_run_local_work(ctx);
265 
266 	/* want to run this after in case more is added */
267 	ret2 = io_run_task_work();
268 
269 	/* Try propagate error in favour of if tasks were run,
270 	 * but still make sure to run them if requested
271 	 */
272 	if (ret >= 0)
273 		ret += ret2;
274 
275 	return ret;
276 }
277 
278 static inline int io_run_local_work_locked(struct io_ring_ctx *ctx)
279 {
280 	if (llist_empty(&ctx->work_llist))
281 		return 0;
282 	return __io_run_local_work(ctx, true);
283 }
284 
285 static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
286 {
287 	if (!*locked) {
288 		mutex_lock(&ctx->uring_lock);
289 		*locked = true;
290 	}
291 }
292 
293 /*
294  * Don't complete immediately but use deferred completion infrastructure.
295  * Protected by ->uring_lock and can only be used either with
296  * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
297  */
298 static inline void io_req_complete_defer(struct io_kiocb *req)
299 	__must_hold(&req->ctx->uring_lock)
300 {
301 	struct io_submit_state *state = &req->ctx->submit_state;
302 
303 	lockdep_assert_held(&req->ctx->uring_lock);
304 
305 	wq_list_add_tail(&req->comp_list, &state->compl_reqs);
306 }
307 
308 static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
309 {
310 	if (unlikely(ctx->off_timeout_used || ctx->drain_active || ctx->has_evfd))
311 		__io_commit_cqring_flush(ctx);
312 }
313 
314 /* must to be called somewhat shortly after putting a request */
315 static inline void io_put_task(struct task_struct *task, int nr)
316 {
317 	if (likely(task == current))
318 		task->io_uring->cached_refs += nr;
319 	else
320 		__io_put_task(task, nr);
321 }
322 
323 static inline void io_get_task_refs(int nr)
324 {
325 	struct io_uring_task *tctx = current->io_uring;
326 
327 	tctx->cached_refs -= nr;
328 	if (unlikely(tctx->cached_refs < 0))
329 		io_task_refs_refill(tctx);
330 }
331 
332 static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
333 {
334 	return !ctx->submit_state.free_list.next;
335 }
336 
337 static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx)
338 {
339 	if (unlikely(io_req_cache_empty(ctx)))
340 		return __io_alloc_req_refill(ctx);
341 	return true;
342 }
343 
344 static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
345 {
346 	struct io_wq_work_node *node;
347 
348 	node = wq_stack_extract(&ctx->submit_state.free_list);
349 	return container_of(node, struct io_kiocb, comp_list);
350 }
351 
352 static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
353 {
354 	return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) ||
355 		      ctx->submitter_task == current);
356 }
357 
358 #endif
359