xref: /openbmc/linux/io_uring/io_uring.h (revision 973fc83f)
1 #ifndef IOU_CORE_H
2 #define IOU_CORE_H
3 
4 #include <linux/errno.h>
5 #include <linux/lockdep.h>
6 #include <linux/io_uring_types.h>
7 #include <uapi/linux/eventpoll.h>
8 #include "io-wq.h"
9 #include "slist.h"
10 #include "filetable.h"
11 
12 #ifndef CREATE_TRACE_POINTS
13 #include <trace/events/io_uring.h>
14 #endif
15 
16 enum {
17 	IOU_OK			= 0,
18 	IOU_ISSUE_SKIP_COMPLETE	= -EIOCBQUEUED,
19 
20 	/*
21 	 * Intended only when both IO_URING_F_MULTISHOT is passed
22 	 * to indicate to the poll runner that multishot should be
23 	 * removed and the result is set on req->cqe.res.
24 	 */
25 	IOU_STOP_MULTISHOT	= -ECANCELED,
26 };
27 
28 struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow);
29 bool io_req_cqe_overflow(struct io_kiocb *req);
30 int io_run_task_work_sig(struct io_ring_ctx *ctx);
31 int __io_run_local_work(struct io_ring_ctx *ctx, bool *locked);
32 int io_run_local_work(struct io_ring_ctx *ctx);
33 void io_req_defer_failed(struct io_kiocb *req, s32 res);
34 void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags);
35 bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
36 		     bool allow_overflow);
37 bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
38 		     bool allow_overflow);
39 void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
40 
41 static inline void io_req_complete_post_tw(struct io_kiocb *req, bool *locked)
42 {
43 	unsigned flags = *locked ? 0 : IO_URING_F_UNLOCKED;
44 
45 	io_req_complete_post(req, flags);
46 }
47 
48 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
49 
50 struct file *io_file_get_normal(struct io_kiocb *req, int fd);
51 struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
52 			       unsigned issue_flags);
53 
54 static inline bool io_req_ffs_set(struct io_kiocb *req)
55 {
56 	return req->flags & REQ_F_FIXED_FILE;
57 }
58 
59 void __io_req_task_work_add(struct io_kiocb *req, bool allow_local);
60 bool io_is_uring_fops(struct file *file);
61 bool io_alloc_async_data(struct io_kiocb *req);
62 void io_req_task_queue(struct io_kiocb *req);
63 void io_queue_iowq(struct io_kiocb *req, bool *dont_use);
64 void io_req_task_complete(struct io_kiocb *req, bool *locked);
65 void io_req_task_queue_fail(struct io_kiocb *req, int ret);
66 void io_req_task_submit(struct io_kiocb *req, bool *locked);
67 void tctx_task_work(struct callback_head *cb);
68 __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
69 int io_uring_alloc_task_context(struct task_struct *task,
70 				struct io_ring_ctx *ctx);
71 
72 int io_poll_issue(struct io_kiocb *req, bool *locked);
73 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
74 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
75 void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node);
76 int io_req_prep_async(struct io_kiocb *req);
77 
78 struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
79 void io_wq_submit_work(struct io_wq_work *work);
80 
81 void io_free_req(struct io_kiocb *req);
82 void io_queue_next(struct io_kiocb *req);
83 void __io_put_task(struct task_struct *task, int nr);
84 void io_task_refs_refill(struct io_uring_task *tctx);
85 bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
86 
87 bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
88 			bool cancel_all);
89 
90 static inline void io_req_task_work_add(struct io_kiocb *req)
91 {
92 	__io_req_task_work_add(req, true);
93 }
94 
95 #define io_for_each_link(pos, head) \
96 	for (pos = (head); pos; pos = pos->link)
97 
98 static inline void io_cq_lock(struct io_ring_ctx *ctx)
99 	__acquires(ctx->completion_lock)
100 {
101 	spin_lock(&ctx->completion_lock);
102 }
103 
104 void io_cq_unlock_post(struct io_ring_ctx *ctx);
105 
106 static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,
107 						       bool overflow)
108 {
109 	if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
110 		struct io_uring_cqe *cqe = ctx->cqe_cached;
111 
112 		ctx->cached_cq_tail++;
113 		ctx->cqe_cached++;
114 		if (ctx->flags & IORING_SETUP_CQE32)
115 			ctx->cqe_cached++;
116 		return cqe;
117 	}
118 
119 	return __io_get_cqe(ctx, overflow);
120 }
121 
122 static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
123 {
124 	return io_get_cqe_overflow(ctx, false);
125 }
126 
127 static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
128 				     struct io_kiocb *req)
129 {
130 	struct io_uring_cqe *cqe;
131 
132 	/*
133 	 * If we can't get a cq entry, userspace overflowed the
134 	 * submission (by quite a lot). Increment the overflow count in
135 	 * the ring.
136 	 */
137 	cqe = io_get_cqe(ctx);
138 	if (unlikely(!cqe))
139 		return io_req_cqe_overflow(req);
140 
141 	trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
142 				req->cqe.res, req->cqe.flags,
143 				(req->flags & REQ_F_CQE32_INIT) ? req->extra1 : 0,
144 				(req->flags & REQ_F_CQE32_INIT) ? req->extra2 : 0);
145 
146 	memcpy(cqe, &req->cqe, sizeof(*cqe));
147 
148 	if (ctx->flags & IORING_SETUP_CQE32) {
149 		u64 extra1 = 0, extra2 = 0;
150 
151 		if (req->flags & REQ_F_CQE32_INIT) {
152 			extra1 = req->extra1;
153 			extra2 = req->extra2;
154 		}
155 
156 		WRITE_ONCE(cqe->big_cqe[0], extra1);
157 		WRITE_ONCE(cqe->big_cqe[1], extra2);
158 	}
159 	return true;
160 }
161 
162 static inline void req_set_fail(struct io_kiocb *req)
163 {
164 	req->flags |= REQ_F_FAIL;
165 	if (req->flags & REQ_F_CQE_SKIP) {
166 		req->flags &= ~REQ_F_CQE_SKIP;
167 		req->flags |= REQ_F_SKIP_LINK_CQES;
168 	}
169 }
170 
171 static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
172 {
173 	req->cqe.res = res;
174 	req->cqe.flags = cflags;
175 }
176 
177 static inline bool req_has_async_data(struct io_kiocb *req)
178 {
179 	return req->flags & REQ_F_ASYNC_DATA;
180 }
181 
182 static inline void io_put_file(struct file *file)
183 {
184 	if (file)
185 		fput(file);
186 }
187 
188 static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
189 					 unsigned issue_flags)
190 {
191 	lockdep_assert_held(&ctx->uring_lock);
192 	if (issue_flags & IO_URING_F_UNLOCKED)
193 		mutex_unlock(&ctx->uring_lock);
194 }
195 
196 static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
197 				       unsigned issue_flags)
198 {
199 	/*
200 	 * "Normal" inline submissions always hold the uring_lock, since we
201 	 * grab it from the system call. Same is true for the SQPOLL offload.
202 	 * The only exception is when we've detached the request and issue it
203 	 * from an async worker thread, grab the lock for that case.
204 	 */
205 	if (issue_flags & IO_URING_F_UNLOCKED)
206 		mutex_lock(&ctx->uring_lock);
207 	lockdep_assert_held(&ctx->uring_lock);
208 }
209 
210 static inline void io_commit_cqring(struct io_ring_ctx *ctx)
211 {
212 	/* order cqe stores with ring update */
213 	smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
214 }
215 
216 /* requires smb_mb() prior, see wq_has_sleeper() */
217 static inline void __io_cqring_wake(struct io_ring_ctx *ctx)
218 {
219 	/*
220 	 * Trigger waitqueue handler on all waiters on our waitqueue. This
221 	 * won't necessarily wake up all the tasks, io_should_wake() will make
222 	 * that decision.
223 	 *
224 	 * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter
225 	 * set in the mask so that if we recurse back into our own poll
226 	 * waitqueue handlers, we know we have a dependency between eventfd or
227 	 * epoll and should terminate multishot poll at that point.
228 	 */
229 	if (waitqueue_active(&ctx->cq_wait))
230 		__wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
231 				poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
232 }
233 
234 static inline void io_cqring_wake(struct io_ring_ctx *ctx)
235 {
236 	smp_mb();
237 	__io_cqring_wake(ctx);
238 }
239 
240 static inline bool io_sqring_full(struct io_ring_ctx *ctx)
241 {
242 	struct io_rings *r = ctx->rings;
243 
244 	return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
245 }
246 
247 static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
248 {
249 	struct io_rings *rings = ctx->rings;
250 
251 	/* make sure SQ entry isn't read before tail */
252 	return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
253 }
254 
255 static inline int io_run_task_work(void)
256 {
257 	if (task_work_pending(current)) {
258 		if (test_thread_flag(TIF_NOTIFY_SIGNAL))
259 			clear_notify_signal();
260 		__set_current_state(TASK_RUNNING);
261 		task_work_run();
262 		return 1;
263 	}
264 
265 	return 0;
266 }
267 
268 static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
269 {
270 	return test_thread_flag(TIF_NOTIFY_SIGNAL) ||
271 		!wq_list_empty(&ctx->work_llist);
272 }
273 
274 static inline int io_run_task_work_ctx(struct io_ring_ctx *ctx)
275 {
276 	int ret = 0;
277 	int ret2;
278 
279 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
280 		ret = io_run_local_work(ctx);
281 
282 	/* want to run this after in case more is added */
283 	ret2 = io_run_task_work();
284 
285 	/* Try propagate error in favour of if tasks were run,
286 	 * but still make sure to run them if requested
287 	 */
288 	if (ret >= 0)
289 		ret += ret2;
290 
291 	return ret;
292 }
293 
294 static inline int io_run_local_work_locked(struct io_ring_ctx *ctx)
295 {
296 	bool locked;
297 	int ret;
298 
299 	if (llist_empty(&ctx->work_llist))
300 		return 0;
301 
302 	locked = true;
303 	ret = __io_run_local_work(ctx, &locked);
304 	/* shouldn't happen! */
305 	if (WARN_ON_ONCE(!locked))
306 		mutex_lock(&ctx->uring_lock);
307 	return ret;
308 }
309 
310 static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
311 {
312 	if (!*locked) {
313 		mutex_lock(&ctx->uring_lock);
314 		*locked = true;
315 	}
316 }
317 
318 /*
319  * Don't complete immediately but use deferred completion infrastructure.
320  * Protected by ->uring_lock and can only be used either with
321  * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
322  */
323 static inline void io_req_complete_defer(struct io_kiocb *req)
324 	__must_hold(&req->ctx->uring_lock)
325 {
326 	struct io_submit_state *state = &req->ctx->submit_state;
327 
328 	lockdep_assert_held(&req->ctx->uring_lock);
329 
330 	wq_list_add_tail(&req->comp_list, &state->compl_reqs);
331 }
332 
333 static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
334 {
335 	if (unlikely(ctx->off_timeout_used || ctx->drain_active || ctx->has_evfd))
336 		__io_commit_cqring_flush(ctx);
337 }
338 
339 /* must to be called somewhat shortly after putting a request */
340 static inline void io_put_task(struct task_struct *task, int nr)
341 {
342 	if (likely(task == current))
343 		task->io_uring->cached_refs += nr;
344 	else
345 		__io_put_task(task, nr);
346 }
347 
348 static inline void io_get_task_refs(int nr)
349 {
350 	struct io_uring_task *tctx = current->io_uring;
351 
352 	tctx->cached_refs -= nr;
353 	if (unlikely(tctx->cached_refs < 0))
354 		io_task_refs_refill(tctx);
355 }
356 
357 static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
358 {
359 	return !ctx->submit_state.free_list.next;
360 }
361 
362 static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx)
363 {
364 	if (unlikely(io_req_cache_empty(ctx)))
365 		return __io_alloc_req_refill(ctx);
366 	return true;
367 }
368 
369 static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
370 {
371 	struct io_wq_work_node *node;
372 
373 	node = wq_stack_extract(&ctx->submit_state.free_list);
374 	return container_of(node, struct io_kiocb, comp_list);
375 }
376 
377 static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
378 {
379 	return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) ||
380 		      ctx->submitter_task == current);
381 }
382 
383 static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
384 {
385 	io_req_set_res(req, res, 0);
386 	req->io_task_work.func = io_req_task_complete;
387 	io_req_task_work_add(req);
388 }
389 
390 #endif
391