xref: /openbmc/linux/io_uring/io_uring.h (revision ab1c84d8)
1 #ifndef IOU_CORE_H
2 #define IOU_CORE_H
3 
4 #include <linux/errno.h>
5 #include <linux/lockdep.h>
6 #include <linux/io_uring_types.h>
7 #include "io-wq.h"
8 #include "filetable.h"
9 
10 #ifndef CREATE_TRACE_POINTS
11 #include <trace/events/io_uring.h>
12 #endif
13 
14 enum {
15 	IOU_OK			= 0,
16 	IOU_ISSUE_SKIP_COMPLETE	= -EIOCBQUEUED,
17 };
18 
19 struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx);
20 bool io_req_cqe_overflow(struct io_kiocb *req);
21 
22 static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
23 {
24 	if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
25 		struct io_uring_cqe *cqe = ctx->cqe_cached;
26 
27 		ctx->cached_cq_tail++;
28 		ctx->cqe_cached++;
29 		if (ctx->flags & IORING_SETUP_CQE32)
30 			ctx->cqe_cached++;
31 		return cqe;
32 	}
33 
34 	return __io_get_cqe(ctx);
35 }
36 
37 static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
38 				     struct io_kiocb *req)
39 {
40 	struct io_uring_cqe *cqe;
41 
42 	trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
43 				req->cqe.res, req->cqe.flags,
44 				(req->flags & REQ_F_CQE32_INIT) ? req->extra1 : 0,
45 				(req->flags & REQ_F_CQE32_INIT) ? req->extra2 : 0);
46 	/*
47 	 * If we can't get a cq entry, userspace overflowed the
48 	 * submission (by quite a lot). Increment the overflow count in
49 	 * the ring.
50 	 */
51 	cqe = io_get_cqe(ctx);
52 	if (unlikely(!cqe))
53 		return io_req_cqe_overflow(req);
54 	memcpy(cqe, &req->cqe, sizeof(*cqe));
55 
56 	if (ctx->flags & IORING_SETUP_CQE32) {
57 		u64 extra1 = 0, extra2 = 0;
58 
59 		if (req->flags & REQ_F_CQE32_INIT) {
60 			extra1 = req->extra1;
61 			extra2 = req->extra2;
62 		}
63 
64 		WRITE_ONCE(cqe->big_cqe[0], extra1);
65 		WRITE_ONCE(cqe->big_cqe[1], extra2);
66 	}
67 	return true;
68 }
69 
70 static inline void req_set_fail(struct io_kiocb *req)
71 {
72 	req->flags |= REQ_F_FAIL;
73 	if (req->flags & REQ_F_CQE_SKIP) {
74 		req->flags &= ~REQ_F_CQE_SKIP;
75 		req->flags |= REQ_F_SKIP_LINK_CQES;
76 	}
77 }
78 
79 static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
80 {
81 	req->cqe.res = res;
82 	req->cqe.flags = cflags;
83 }
84 
85 static inline bool req_has_async_data(struct io_kiocb *req)
86 {
87 	return req->flags & REQ_F_ASYNC_DATA;
88 }
89 
90 static inline void io_put_file(struct file *file)
91 {
92 	if (file)
93 		fput(file);
94 }
95 
96 static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
97 					 unsigned issue_flags)
98 {
99 	lockdep_assert_held(&ctx->uring_lock);
100 	if (issue_flags & IO_URING_F_UNLOCKED)
101 		mutex_unlock(&ctx->uring_lock);
102 }
103 
104 static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
105 				       unsigned issue_flags)
106 {
107 	/*
108 	 * "Normal" inline submissions always hold the uring_lock, since we
109 	 * grab it from the system call. Same is true for the SQPOLL offload.
110 	 * The only exception is when we've detached the request and issue it
111 	 * from an async worker thread, grab the lock for that case.
112 	 */
113 	if (issue_flags & IO_URING_F_UNLOCKED)
114 		mutex_lock(&ctx->uring_lock);
115 	lockdep_assert_held(&ctx->uring_lock);
116 }
117 
118 static inline void io_commit_cqring(struct io_ring_ctx *ctx)
119 {
120 	/* order cqe stores with ring update */
121 	smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
122 }
123 
124 static inline void io_cqring_wake(struct io_ring_ctx *ctx)
125 {
126 	/*
127 	 * wake_up_all() may seem excessive, but io_wake_function() and
128 	 * io_should_wake() handle the termination of the loop and only
129 	 * wake as many waiters as we need to.
130 	 */
131 	if (wq_has_sleeper(&ctx->cq_wait))
132 		wake_up_all(&ctx->cq_wait);
133 }
134 
135 static inline bool io_sqring_full(struct io_ring_ctx *ctx)
136 {
137 	struct io_rings *r = ctx->rings;
138 
139 	return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
140 }
141 
142 static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
143 {
144 	struct io_rings *rings = ctx->rings;
145 
146 	/* make sure SQ entry isn't read before tail */
147 	return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
148 }
149 
150 static inline bool io_run_task_work(void)
151 {
152 	if (test_thread_flag(TIF_NOTIFY_SIGNAL) || task_work_pending(current)) {
153 		__set_current_state(TASK_RUNNING);
154 		clear_notify_signal();
155 		if (task_work_pending(current))
156 			task_work_run();
157 		return true;
158 	}
159 
160 	return false;
161 }
162 
163 static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
164 {
165 	if (!*locked) {
166 		mutex_lock(&ctx->uring_lock);
167 		*locked = true;
168 	}
169 }
170 
171 static inline void io_req_add_compl_list(struct io_kiocb *req)
172 {
173 	struct io_submit_state *state = &req->ctx->submit_state;
174 
175 	if (!(req->flags & REQ_F_CQE_SKIP))
176 		state->flush_cqes = true;
177 	wq_list_add_tail(&req->comp_list, &state->compl_reqs);
178 }
179 
180 int io_run_task_work_sig(void);
181 void io_req_complete_failed(struct io_kiocb *req, s32 res);
182 void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
183 void io_req_complete_post(struct io_kiocb *req);
184 void __io_req_complete_post(struct io_kiocb *req);
185 bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
186 void io_cqring_ev_posted(struct io_ring_ctx *ctx);
187 void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
188 
189 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
190 
191 struct file *io_file_get_normal(struct io_kiocb *req, int fd);
192 struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
193 			       unsigned issue_flags);
194 
195 bool io_is_uring_fops(struct file *file);
196 bool io_alloc_async_data(struct io_kiocb *req);
197 void io_req_task_work_add(struct io_kiocb *req);
198 void io_req_task_prio_work_add(struct io_kiocb *req);
199 void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags);
200 void io_req_task_queue(struct io_kiocb *req);
201 void io_queue_iowq(struct io_kiocb *req, bool *dont_use);
202 void io_req_task_complete(struct io_kiocb *req, bool *locked);
203 void io_req_task_queue_fail(struct io_kiocb *req, int ret);
204 void io_req_task_submit(struct io_kiocb *req, bool *locked);
205 void tctx_task_work(struct callback_head *cb);
206 __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
207 int io_uring_alloc_task_context(struct task_struct *task,
208 				struct io_ring_ctx *ctx);
209 
210 int io_poll_issue(struct io_kiocb *req, bool *locked);
211 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
212 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
213 void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node);
214 int io_req_prep_async(struct io_kiocb *req);
215 
216 struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
217 void io_wq_submit_work(struct io_wq_work *work);
218 
219 void io_free_req(struct io_kiocb *req);
220 void io_queue_next(struct io_kiocb *req);
221 
222 bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
223 			bool cancel_all);
224 
225 #define io_for_each_link(pos, head) \
226 	for (pos = (head); pos; pos = pos->link)
227 
228 #endif
229