xref: /openbmc/linux/io_uring/io_uring.h (revision 3beed235)
1 #ifndef IOU_CORE_H
2 #define IOU_CORE_H
3 
4 #include <linux/errno.h>
5 #include <linux/lockdep.h>
6 #include <linux/resume_user_mode.h>
7 #include <linux/kasan.h>
8 #include <linux/io_uring_types.h>
9 #include <uapi/linux/eventpoll.h>
10 #include "io-wq.h"
11 #include "slist.h"
12 #include "filetable.h"
13 
14 #ifndef CREATE_TRACE_POINTS
15 #include <trace/events/io_uring.h>
16 #endif
17 
18 enum {
19 	/* don't use deferred task_work */
20 	IOU_F_TWQ_FORCE_NORMAL			= 1,
21 
22 	/*
23 	 * A hint to not wake right away but delay until there are enough of
24 	 * tw's queued to match the number of CQEs the task is waiting for.
25 	 *
26 	 * Must not be used wirh requests generating more than one CQE.
27 	 * It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set.
28 	 */
29 	IOU_F_TWQ_LAZY_WAKE			= 2,
30 };
31 
32 enum {
33 	IOU_OK			= 0,
34 	IOU_ISSUE_SKIP_COMPLETE	= -EIOCBQUEUED,
35 
36 	/*
37 	 * Intended only when both IO_URING_F_MULTISHOT is passed
38 	 * to indicate to the poll runner that multishot should be
39 	 * removed and the result is set on req->cqe.res.
40 	 */
41 	IOU_STOP_MULTISHOT	= -ECANCELED,
42 };
43 
44 struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow);
45 bool io_req_cqe_overflow(struct io_kiocb *req);
46 int io_run_task_work_sig(struct io_ring_ctx *ctx);
47 void io_req_defer_failed(struct io_kiocb *req, s32 res);
48 void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags);
49 bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
50 bool io_aux_cqe(const struct io_kiocb *req, bool defer, s32 res, u32 cflags,
51 		bool allow_overflow);
52 void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
53 
54 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
55 
56 struct file *io_file_get_normal(struct io_kiocb *req, int fd);
57 struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
58 			       unsigned issue_flags);
59 
60 void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
61 bool io_is_uring_fops(struct file *file);
62 bool io_alloc_async_data(struct io_kiocb *req);
63 void io_req_task_queue(struct io_kiocb *req);
64 void io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use);
65 void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts);
66 void io_req_task_queue_fail(struct io_kiocb *req, int ret);
67 void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts);
68 void tctx_task_work(struct callback_head *cb);
69 __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
70 int io_uring_alloc_task_context(struct task_struct *task,
71 				struct io_ring_ctx *ctx);
72 
73 int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file,
74 				     int start, int end);
75 
76 int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts);
77 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
78 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
79 void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node);
80 int io_req_prep_async(struct io_kiocb *req);
81 
82 struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
83 void io_wq_submit_work(struct io_wq_work *work);
84 
85 void io_free_req(struct io_kiocb *req);
86 void io_queue_next(struct io_kiocb *req);
87 void io_task_refs_refill(struct io_uring_task *tctx);
88 bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
89 
90 bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
91 			bool cancel_all);
92 
93 #define io_lockdep_assert_cq_locked(ctx)				\
94 	do {								\
95 		lockdep_assert(in_task());				\
96 									\
97 		if (ctx->flags & IORING_SETUP_IOPOLL) {			\
98 			lockdep_assert_held(&ctx->uring_lock);		\
99 		} else if (!ctx->task_complete) {			\
100 			lockdep_assert_held(&ctx->completion_lock);	\
101 		} else if (ctx->submitter_task->flags & PF_EXITING) {	\
102 			lockdep_assert(current_work());			\
103 		} else {						\
104 			lockdep_assert(current == ctx->submitter_task);	\
105 		}							\
106 	} while (0)
107 
108 static inline void io_req_task_work_add(struct io_kiocb *req)
109 {
110 	__io_req_task_work_add(req, 0);
111 }
112 
113 #define io_for_each_link(pos, head) \
114 	for (pos = (head); pos; pos = pos->link)
115 
116 void io_cq_unlock_post(struct io_ring_ctx *ctx);
117 
118 static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,
119 						       bool overflow)
120 {
121 	io_lockdep_assert_cq_locked(ctx);
122 
123 	if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
124 		struct io_uring_cqe *cqe = ctx->cqe_cached;
125 
126 		ctx->cached_cq_tail++;
127 		ctx->cqe_cached++;
128 		if (ctx->flags & IORING_SETUP_CQE32)
129 			ctx->cqe_cached++;
130 		return cqe;
131 	}
132 
133 	return __io_get_cqe(ctx, overflow);
134 }
135 
136 static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
137 {
138 	return io_get_cqe_overflow(ctx, false);
139 }
140 
141 static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
142 				     struct io_kiocb *req)
143 {
144 	struct io_uring_cqe *cqe;
145 
146 	/*
147 	 * If we can't get a cq entry, userspace overflowed the
148 	 * submission (by quite a lot). Increment the overflow count in
149 	 * the ring.
150 	 */
151 	cqe = io_get_cqe(ctx);
152 	if (unlikely(!cqe))
153 		return false;
154 
155 	trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
156 				req->cqe.res, req->cqe.flags,
157 				(req->flags & REQ_F_CQE32_INIT) ? req->extra1 : 0,
158 				(req->flags & REQ_F_CQE32_INIT) ? req->extra2 : 0);
159 
160 	memcpy(cqe, &req->cqe, sizeof(*cqe));
161 
162 	if (ctx->flags & IORING_SETUP_CQE32) {
163 		u64 extra1 = 0, extra2 = 0;
164 
165 		if (req->flags & REQ_F_CQE32_INIT) {
166 			extra1 = req->extra1;
167 			extra2 = req->extra2;
168 		}
169 
170 		WRITE_ONCE(cqe->big_cqe[0], extra1);
171 		WRITE_ONCE(cqe->big_cqe[1], extra2);
172 	}
173 	return true;
174 }
175 
176 static inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
177 				   struct io_kiocb *req)
178 {
179 	if (likely(__io_fill_cqe_req(ctx, req)))
180 		return true;
181 	return io_req_cqe_overflow(req);
182 }
183 
184 static inline void req_set_fail(struct io_kiocb *req)
185 {
186 	req->flags |= REQ_F_FAIL;
187 	if (req->flags & REQ_F_CQE_SKIP) {
188 		req->flags &= ~REQ_F_CQE_SKIP;
189 		req->flags |= REQ_F_SKIP_LINK_CQES;
190 	}
191 }
192 
193 static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
194 {
195 	req->cqe.res = res;
196 	req->cqe.flags = cflags;
197 }
198 
199 static inline bool req_has_async_data(struct io_kiocb *req)
200 {
201 	return req->flags & REQ_F_ASYNC_DATA;
202 }
203 
204 static inline void io_put_file(struct file *file)
205 {
206 	if (file)
207 		fput(file);
208 }
209 
210 static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
211 					 unsigned issue_flags)
212 {
213 	lockdep_assert_held(&ctx->uring_lock);
214 	if (issue_flags & IO_URING_F_UNLOCKED)
215 		mutex_unlock(&ctx->uring_lock);
216 }
217 
218 static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
219 				       unsigned issue_flags)
220 {
221 	/*
222 	 * "Normal" inline submissions always hold the uring_lock, since we
223 	 * grab it from the system call. Same is true for the SQPOLL offload.
224 	 * The only exception is when we've detached the request and issue it
225 	 * from an async worker thread, grab the lock for that case.
226 	 */
227 	if (issue_flags & IO_URING_F_UNLOCKED)
228 		mutex_lock(&ctx->uring_lock);
229 	lockdep_assert_held(&ctx->uring_lock);
230 }
231 
232 static inline void io_commit_cqring(struct io_ring_ctx *ctx)
233 {
234 	/* order cqe stores with ring update */
235 	smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
236 }
237 
238 static inline void io_poll_wq_wake(struct io_ring_ctx *ctx)
239 {
240 	if (wq_has_sleeper(&ctx->poll_wq))
241 		__wake_up(&ctx->poll_wq, TASK_NORMAL, 0,
242 				poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
243 }
244 
245 static inline void io_cqring_wake(struct io_ring_ctx *ctx)
246 {
247 	/*
248 	 * Trigger waitqueue handler on all waiters on our waitqueue. This
249 	 * won't necessarily wake up all the tasks, io_should_wake() will make
250 	 * that decision.
251 	 *
252 	 * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter
253 	 * set in the mask so that if we recurse back into our own poll
254 	 * waitqueue handlers, we know we have a dependency between eventfd or
255 	 * epoll and should terminate multishot poll at that point.
256 	 */
257 	if (wq_has_sleeper(&ctx->cq_wait))
258 		__wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
259 				poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
260 }
261 
262 static inline bool io_sqring_full(struct io_ring_ctx *ctx)
263 {
264 	struct io_rings *r = ctx->rings;
265 
266 	return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
267 }
268 
269 static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
270 {
271 	struct io_rings *rings = ctx->rings;
272 	unsigned int entries;
273 
274 	/* make sure SQ entry isn't read before tail */
275 	entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
276 	return min(entries, ctx->sq_entries);
277 }
278 
279 static inline int io_run_task_work(void)
280 {
281 	/*
282 	 * Always check-and-clear the task_work notification signal. With how
283 	 * signaling works for task_work, we can find it set with nothing to
284 	 * run. We need to clear it for that case, like get_signal() does.
285 	 */
286 	if (test_thread_flag(TIF_NOTIFY_SIGNAL))
287 		clear_notify_signal();
288 	/*
289 	 * PF_IO_WORKER never returns to userspace, so check here if we have
290 	 * notify work that needs processing.
291 	 */
292 	if (current->flags & PF_IO_WORKER &&
293 	    test_thread_flag(TIF_NOTIFY_RESUME)) {
294 		__set_current_state(TASK_RUNNING);
295 		resume_user_mode_work(NULL);
296 	}
297 	if (task_work_pending(current)) {
298 		__set_current_state(TASK_RUNNING);
299 		task_work_run();
300 		return 1;
301 	}
302 
303 	return 0;
304 }
305 
306 static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
307 {
308 	return task_work_pending(current) || !wq_list_empty(&ctx->work_llist);
309 }
310 
311 static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts)
312 {
313 	if (!ts->locked) {
314 		mutex_lock(&ctx->uring_lock);
315 		ts->locked = true;
316 	}
317 }
318 
319 /*
320  * Don't complete immediately but use deferred completion infrastructure.
321  * Protected by ->uring_lock and can only be used either with
322  * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
323  */
324 static inline void io_req_complete_defer(struct io_kiocb *req)
325 	__must_hold(&req->ctx->uring_lock)
326 {
327 	struct io_submit_state *state = &req->ctx->submit_state;
328 
329 	lockdep_assert_held(&req->ctx->uring_lock);
330 
331 	wq_list_add_tail(&req->comp_list, &state->compl_reqs);
332 }
333 
334 static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
335 {
336 	if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
337 		     ctx->has_evfd || ctx->poll_activated))
338 		__io_commit_cqring_flush(ctx);
339 }
340 
341 static inline void io_get_task_refs(int nr)
342 {
343 	struct io_uring_task *tctx = current->io_uring;
344 
345 	tctx->cached_refs -= nr;
346 	if (unlikely(tctx->cached_refs < 0))
347 		io_task_refs_refill(tctx);
348 }
349 
350 static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
351 {
352 	return !ctx->submit_state.free_list.next;
353 }
354 
355 extern struct kmem_cache *req_cachep;
356 
357 static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx)
358 {
359 	struct io_kiocb *req;
360 
361 	req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list);
362 	kasan_unpoison_object_data(req_cachep, req);
363 	wq_stack_extract(&ctx->submit_state.free_list);
364 	return req;
365 }
366 
367 static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req)
368 {
369 	if (unlikely(io_req_cache_empty(ctx))) {
370 		if (!__io_alloc_req_refill(ctx))
371 			return false;
372 	}
373 	*req = io_extract_req(ctx);
374 	return true;
375 }
376 
377 static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx)
378 {
379 	return likely(ctx->submitter_task == current);
380 }
381 
382 static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
383 {
384 	return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) ||
385 		      ctx->submitter_task == current);
386 }
387 
388 static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
389 {
390 	io_req_set_res(req, res, 0);
391 	req->io_task_work.func = io_req_task_complete;
392 	io_req_task_work_add(req);
393 }
394 
395 /*
396  * IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each
397  * slot.
398  */
399 static inline size_t uring_sqe_size(struct io_ring_ctx *ctx)
400 {
401 	if (ctx->flags & IORING_SETUP_SQE128)
402 		return 2 * sizeof(struct io_uring_sqe);
403 	return sizeof(struct io_uring_sqe);
404 }
405 #endif
406