1 #ifndef IOU_CORE_H
2 #define IOU_CORE_H
3
4 #include <linux/errno.h>
5 #include <linux/lockdep.h>
6 #include <linux/resume_user_mode.h>
7 #include <linux/kasan.h>
8 #include <linux/poll.h>
9 #include <linux/io_uring_types.h>
10 #include <uapi/linux/eventpoll.h>
11 #include "io-wq.h"
12 #include "slist.h"
13 #include "filetable.h"
14
15 #ifndef CREATE_TRACE_POINTS
16 #include <trace/events/io_uring.h>
17 #endif
18
19 enum {
20 /*
21 * A hint to not wake right away but delay until there are enough of
22 * tw's queued to match the number of CQEs the task is waiting for.
23 *
24 * Must not be used wirh requests generating more than one CQE.
25 * It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set.
26 */
27 IOU_F_TWQ_LAZY_WAKE = 1,
28 };
29
30 enum {
31 IOU_OK = 0,
32 IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
33
34 /*
35 * Requeue the task_work to restart operations on this request. The
36 * actual value isn't important, should just be not an otherwise
37 * valid error code, yet less than -MAX_ERRNO and valid internally.
38 */
39 IOU_REQUEUE = -3072,
40
41 /*
42 * Intended only when both IO_URING_F_MULTISHOT is passed
43 * to indicate to the poll runner that multishot should be
44 * removed and the result is set on req->cqe.res.
45 */
46 IOU_STOP_MULTISHOT = -ECANCELED,
47 };
48
49 bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
50 void io_req_cqe_overflow(struct io_kiocb *req);
51 int io_run_task_work_sig(struct io_ring_ctx *ctx);
52 void io_req_defer_failed(struct io_kiocb *req, s32 res);
53 void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags);
54 bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
55 bool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags);
56 void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
57
58 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
59 int io_uring_mmap_pages(struct io_ring_ctx *ctx, struct vm_area_struct *vma,
60 struct page **pages, int npages);
61
62 struct file *io_file_get_normal(struct io_kiocb *req, int fd);
63 struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
64 unsigned issue_flags);
65
66 void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
67 bool io_alloc_async_data(struct io_kiocb *req);
68 void io_req_task_queue(struct io_kiocb *req);
69 void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts);
70 void io_req_task_queue_fail(struct io_kiocb *req, int ret);
71 void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts);
72 void tctx_task_work(struct callback_head *cb);
73 __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
74 int io_uring_alloc_task_context(struct task_struct *task,
75 struct io_ring_ctx *ctx);
76
77 int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file,
78 int start, int end);
79
80 int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts);
81 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
82 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
83 void __io_submit_flush_completions(struct io_ring_ctx *ctx);
84 int io_req_prep_async(struct io_kiocb *req);
85
86 struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
87 void io_wq_submit_work(struct io_wq_work *work);
88
89 void io_free_req(struct io_kiocb *req);
90 void io_queue_next(struct io_kiocb *req);
91 void io_task_refs_refill(struct io_uring_task *tctx);
92 bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
93
94 bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
95 bool cancel_all);
96
97 void *io_pages_map(struct page ***out_pages, unsigned short *npages,
98 size_t size);
99 void io_pages_unmap(void *ptr, struct page ***pages, unsigned short *npages,
100 bool put_pages);
101
102 #if defined(CONFIG_PROVE_LOCKING)
io_lockdep_assert_cq_locked(struct io_ring_ctx * ctx)103 static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
104 {
105 lockdep_assert(in_task());
106
107 if (ctx->flags & IORING_SETUP_IOPOLL) {
108 lockdep_assert_held(&ctx->uring_lock);
109 } else if (!ctx->task_complete) {
110 lockdep_assert_held(&ctx->completion_lock);
111 } else if (ctx->submitter_task) {
112 /*
113 * ->submitter_task may be NULL and we can still post a CQE,
114 * if the ring has been setup with IORING_SETUP_R_DISABLED.
115 * Not from an SQE, as those cannot be submitted, but via
116 * updating tagged resources.
117 */
118 if (ctx->submitter_task->flags & PF_EXITING)
119 lockdep_assert(current_work());
120 else
121 lockdep_assert(current == ctx->submitter_task);
122 }
123 }
124 #else
io_lockdep_assert_cq_locked(struct io_ring_ctx * ctx)125 static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
126 {
127 }
128 #endif
129
io_req_task_work_add(struct io_kiocb * req)130 static inline void io_req_task_work_add(struct io_kiocb *req)
131 {
132 __io_req_task_work_add(req, 0);
133 }
134
135 #define io_for_each_link(pos, head) \
136 for (pos = (head); pos; pos = pos->link)
137
io_get_cqe_overflow(struct io_ring_ctx * ctx,struct io_uring_cqe ** ret,bool overflow)138 static inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx,
139 struct io_uring_cqe **ret,
140 bool overflow)
141 {
142 io_lockdep_assert_cq_locked(ctx);
143
144 if (unlikely(ctx->cqe_cached >= ctx->cqe_sentinel)) {
145 if (unlikely(!io_cqe_cache_refill(ctx, overflow)))
146 return false;
147 }
148 *ret = ctx->cqe_cached;
149 ctx->cached_cq_tail++;
150 ctx->cqe_cached++;
151 if (ctx->flags & IORING_SETUP_CQE32)
152 ctx->cqe_cached++;
153 return true;
154 }
155
io_get_cqe(struct io_ring_ctx * ctx,struct io_uring_cqe ** ret)156 static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret)
157 {
158 return io_get_cqe_overflow(ctx, ret, false);
159 }
160
io_fill_cqe_req(struct io_ring_ctx * ctx,struct io_kiocb * req)161 static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
162 struct io_kiocb *req)
163 {
164 struct io_uring_cqe *cqe;
165
166 /*
167 * If we can't get a cq entry, userspace overflowed the
168 * submission (by quite a lot). Increment the overflow count in
169 * the ring.
170 */
171 if (unlikely(!io_get_cqe(ctx, &cqe)))
172 return false;
173
174 if (trace_io_uring_complete_enabled())
175 trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
176 req->cqe.res, req->cqe.flags,
177 req->big_cqe.extra1, req->big_cqe.extra2);
178
179 memcpy(cqe, &req->cqe, sizeof(*cqe));
180 if (ctx->flags & IORING_SETUP_CQE32) {
181 memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe));
182 memset(&req->big_cqe, 0, sizeof(req->big_cqe));
183 }
184 return true;
185 }
186
req_set_fail(struct io_kiocb * req)187 static inline void req_set_fail(struct io_kiocb *req)
188 {
189 req->flags |= REQ_F_FAIL;
190 if (req->flags & REQ_F_CQE_SKIP) {
191 req->flags &= ~REQ_F_CQE_SKIP;
192 req->flags |= REQ_F_SKIP_LINK_CQES;
193 }
194 }
195
io_req_set_res(struct io_kiocb * req,s32 res,u32 cflags)196 static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
197 {
198 req->cqe.res = res;
199 req->cqe.flags = cflags;
200 }
201
req_has_async_data(struct io_kiocb * req)202 static inline bool req_has_async_data(struct io_kiocb *req)
203 {
204 return req->flags & REQ_F_ASYNC_DATA;
205 }
206
io_put_file(struct io_kiocb * req)207 static inline void io_put_file(struct io_kiocb *req)
208 {
209 if (!(req->flags & REQ_F_FIXED_FILE) && req->file)
210 fput(req->file);
211 }
212
io_ring_submit_unlock(struct io_ring_ctx * ctx,unsigned issue_flags)213 static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
214 unsigned issue_flags)
215 {
216 lockdep_assert_held(&ctx->uring_lock);
217 if (issue_flags & IO_URING_F_UNLOCKED)
218 mutex_unlock(&ctx->uring_lock);
219 }
220
io_ring_submit_lock(struct io_ring_ctx * ctx,unsigned issue_flags)221 static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
222 unsigned issue_flags)
223 {
224 /*
225 * "Normal" inline submissions always hold the uring_lock, since we
226 * grab it from the system call. Same is true for the SQPOLL offload.
227 * The only exception is when we've detached the request and issue it
228 * from an async worker thread, grab the lock for that case.
229 */
230 if (issue_flags & IO_URING_F_UNLOCKED)
231 mutex_lock(&ctx->uring_lock);
232 lockdep_assert_held(&ctx->uring_lock);
233 }
234
io_commit_cqring(struct io_ring_ctx * ctx)235 static inline void io_commit_cqring(struct io_ring_ctx *ctx)
236 {
237 /* order cqe stores with ring update */
238 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
239 }
240
io_poll_wq_wake(struct io_ring_ctx * ctx)241 static inline void io_poll_wq_wake(struct io_ring_ctx *ctx)
242 {
243 if (wq_has_sleeper(&ctx->poll_wq))
244 __wake_up(&ctx->poll_wq, TASK_NORMAL, 0,
245 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
246 }
247
io_cqring_wake(struct io_ring_ctx * ctx)248 static inline void io_cqring_wake(struct io_ring_ctx *ctx)
249 {
250 /*
251 * Trigger waitqueue handler on all waiters on our waitqueue. This
252 * won't necessarily wake up all the tasks, io_should_wake() will make
253 * that decision.
254 *
255 * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter
256 * set in the mask so that if we recurse back into our own poll
257 * waitqueue handlers, we know we have a dependency between eventfd or
258 * epoll and should terminate multishot poll at that point.
259 */
260 if (wq_has_sleeper(&ctx->cq_wait))
261 __wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
262 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
263 }
264
io_sqring_full(struct io_ring_ctx * ctx)265 static inline bool io_sqring_full(struct io_ring_ctx *ctx)
266 {
267 struct io_rings *r = ctx->rings;
268
269 /*
270 * SQPOLL must use the actual sqring head, as using the cached_sq_head
271 * is race prone if the SQPOLL thread has grabbed entries but not yet
272 * committed them to the ring. For !SQPOLL, this doesn't matter, but
273 * since this helper is just used for SQPOLL sqring waits (or POLLOUT),
274 * just read the actual sqring head unconditionally.
275 */
276 return READ_ONCE(r->sq.tail) - READ_ONCE(r->sq.head) == ctx->sq_entries;
277 }
278
io_sqring_entries(struct io_ring_ctx * ctx)279 static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
280 {
281 struct io_rings *rings = ctx->rings;
282 unsigned int entries;
283
284 /* make sure SQ entry isn't read before tail */
285 entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
286 return min(entries, ctx->sq_entries);
287 }
288
io_run_task_work(void)289 static inline int io_run_task_work(void)
290 {
291 /*
292 * Always check-and-clear the task_work notification signal. With how
293 * signaling works for task_work, we can find it set with nothing to
294 * run. We need to clear it for that case, like get_signal() does.
295 */
296 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
297 clear_notify_signal();
298 /*
299 * PF_IO_WORKER never returns to userspace, so check here if we have
300 * notify work that needs processing.
301 */
302 if (current->flags & PF_IO_WORKER &&
303 test_thread_flag(TIF_NOTIFY_RESUME)) {
304 __set_current_state(TASK_RUNNING);
305 resume_user_mode_work(NULL);
306 }
307 if (task_work_pending(current)) {
308 __set_current_state(TASK_RUNNING);
309 task_work_run();
310 return 1;
311 }
312
313 return 0;
314 }
315
io_task_work_pending(struct io_ring_ctx * ctx)316 static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
317 {
318 return task_work_pending(current) || !llist_empty(&ctx->work_llist);
319 }
320
io_tw_lock(struct io_ring_ctx * ctx,struct io_tw_state * ts)321 static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts)
322 {
323 if (!ts->locked) {
324 mutex_lock(&ctx->uring_lock);
325 ts->locked = true;
326 }
327 }
328
329 /*
330 * Don't complete immediately but use deferred completion infrastructure.
331 * Protected by ->uring_lock and can only be used either with
332 * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
333 */
io_req_complete_defer(struct io_kiocb * req)334 static inline void io_req_complete_defer(struct io_kiocb *req)
335 __must_hold(&req->ctx->uring_lock)
336 {
337 struct io_submit_state *state = &req->ctx->submit_state;
338
339 lockdep_assert_held(&req->ctx->uring_lock);
340
341 wq_list_add_tail(&req->comp_list, &state->compl_reqs);
342 }
343
io_commit_cqring_flush(struct io_ring_ctx * ctx)344 static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
345 {
346 if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
347 ctx->has_evfd || ctx->poll_activated))
348 __io_commit_cqring_flush(ctx);
349 }
350
io_get_task_refs(int nr)351 static inline void io_get_task_refs(int nr)
352 {
353 struct io_uring_task *tctx = current->io_uring;
354
355 tctx->cached_refs -= nr;
356 if (unlikely(tctx->cached_refs < 0))
357 io_task_refs_refill(tctx);
358 }
359
io_req_cache_empty(struct io_ring_ctx * ctx)360 static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
361 {
362 return !ctx->submit_state.free_list.next;
363 }
364
365 extern struct kmem_cache *req_cachep;
366
io_extract_req(struct io_ring_ctx * ctx)367 static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx)
368 {
369 struct io_kiocb *req;
370
371 req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list);
372 wq_stack_extract(&ctx->submit_state.free_list);
373 return req;
374 }
375
io_alloc_req(struct io_ring_ctx * ctx,struct io_kiocb ** req)376 static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req)
377 {
378 if (unlikely(io_req_cache_empty(ctx))) {
379 if (!__io_alloc_req_refill(ctx))
380 return false;
381 }
382 *req = io_extract_req(ctx);
383 return true;
384 }
385
io_allowed_defer_tw_run(struct io_ring_ctx * ctx)386 static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx)
387 {
388 return likely(ctx->submitter_task == current);
389 }
390
io_allowed_run_tw(struct io_ring_ctx * ctx)391 static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
392 {
393 return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) ||
394 ctx->submitter_task == current);
395 }
396
io_req_queue_tw_complete(struct io_kiocb * req,s32 res)397 static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
398 {
399 io_req_set_res(req, res, 0);
400 req->io_task_work.func = io_req_task_complete;
401 io_req_task_work_add(req);
402 }
403
404 /*
405 * IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each
406 * slot.
407 */
uring_sqe_size(struct io_ring_ctx * ctx)408 static inline size_t uring_sqe_size(struct io_ring_ctx *ctx)
409 {
410 if (ctx->flags & IORING_SETUP_SQE128)
411 return 2 * sizeof(struct io_uring_sqe);
412 return sizeof(struct io_uring_sqe);
413 }
414
io_file_can_poll(struct io_kiocb * req)415 static inline bool io_file_can_poll(struct io_kiocb *req)
416 {
417 if (req->flags & REQ_F_CAN_POLL)
418 return true;
419 if (file_can_poll(req->file)) {
420 req->flags |= REQ_F_CAN_POLL;
421 return true;
422 }
423 return false;
424 }
425 #endif
426