xref: /openbmc/linux/io_uring/io_uring.h (revision 329061d3)
1 #ifndef IOU_CORE_H
2 #define IOU_CORE_H
3 
4 #include <linux/errno.h>
5 #include <linux/lockdep.h>
6 #include "io_uring_types.h"
7 
8 enum {
9 	IOU_OK			= 0,
10 	IOU_ISSUE_SKIP_COMPLETE	= -EIOCBQUEUED,
11 };
12 
13 static inline void req_set_fail(struct io_kiocb *req)
14 {
15 	req->flags |= REQ_F_FAIL;
16 	if (req->flags & REQ_F_CQE_SKIP) {
17 		req->flags &= ~REQ_F_CQE_SKIP;
18 		req->flags |= REQ_F_SKIP_LINK_CQES;
19 	}
20 }
21 
22 static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
23 {
24 	req->cqe.res = res;
25 	req->cqe.flags = cflags;
26 }
27 
28 static inline bool req_has_async_data(struct io_kiocb *req)
29 {
30 	return req->flags & REQ_F_ASYNC_DATA;
31 }
32 
33 static inline void io_put_file(struct file *file)
34 {
35 	if (file)
36 		fput(file);
37 }
38 
39 static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
40 					 unsigned issue_flags)
41 {
42 	lockdep_assert_held(&ctx->uring_lock);
43 	if (issue_flags & IO_URING_F_UNLOCKED)
44 		mutex_unlock(&ctx->uring_lock);
45 }
46 
47 static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
48 				       unsigned issue_flags)
49 {
50 	/*
51 	 * "Normal" inline submissions always hold the uring_lock, since we
52 	 * grab it from the system call. Same is true for the SQPOLL offload.
53 	 * The only exception is when we've detached the request and issue it
54 	 * from an async worker thread, grab the lock for that case.
55 	 */
56 	if (issue_flags & IO_URING_F_UNLOCKED)
57 		mutex_lock(&ctx->uring_lock);
58 	lockdep_assert_held(&ctx->uring_lock);
59 }
60 
61 static inline void io_commit_cqring(struct io_ring_ctx *ctx)
62 {
63 	/* order cqe stores with ring update */
64 	smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
65 }
66 
67 static inline bool io_sqring_full(struct io_ring_ctx *ctx)
68 {
69 	struct io_rings *r = ctx->rings;
70 
71 	return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
72 }
73 
74 static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
75 {
76 	struct io_rings *rings = ctx->rings;
77 
78 	/* make sure SQ entry isn't read before tail */
79 	return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
80 }
81 
82 static inline bool io_run_task_work(void)
83 {
84 	if (test_thread_flag(TIF_NOTIFY_SIGNAL) || task_work_pending(current)) {
85 		__set_current_state(TASK_RUNNING);
86 		clear_notify_signal();
87 		if (task_work_pending(current))
88 			task_work_run();
89 		return true;
90 	}
91 
92 	return false;
93 }
94 
95 void io_req_complete_failed(struct io_kiocb *req, s32 res);
96 void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
97 void io_req_complete_post(struct io_kiocb *req);
98 void __io_req_complete_post(struct io_kiocb *req);
99 bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
100 		     u32 cflags);
101 void io_cqring_ev_posted(struct io_ring_ctx *ctx);
102 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
103 			      unsigned int issue_flags);
104 unsigned int io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
105 
106 static inline bool io_do_buffer_select(struct io_kiocb *req)
107 {
108 	if (!(req->flags & REQ_F_BUFFER_SELECT))
109 		return false;
110 	return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
111 }
112 
113 void __io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags);
114 static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
115 {
116 	if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
117 		return;
118 	/*
119 	 * For legacy provided buffer mode, don't recycle if we already did
120 	 * IO to this buffer. For ring-mapped provided buffer mode, we should
121 	 * increment ring->head to explicitly monopolize the buffer to avoid
122 	 * multiple use.
123 	 */
124 	if ((req->flags & REQ_F_BUFFER_SELECTED) &&
125 	    (req->flags & REQ_F_PARTIAL_IO))
126 		return;
127 
128 	/*
129 	 * READV uses fields in `struct io_rw` (len/addr) to stash the selected
130 	 * buffer data. However if that buffer is recycled the original request
131 	 * data stored in addr is lost. Therefore forbid recycling for now.
132 	 */
133 	if (req->opcode == IORING_OP_READV)
134 		return;
135 
136 	__io_kbuf_recycle(req, issue_flags);
137 }
138 
139 struct file *io_file_get_normal(struct io_kiocb *req, int fd);
140 struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
141 			       unsigned issue_flags);
142 int io_fixed_fd_install(struct io_kiocb *req, unsigned int issue_flags,
143 			struct file *file, unsigned int file_slot);
144 int io_install_fixed_file(struct io_kiocb *req, struct file *file,
145 			  unsigned int issue_flags, u32 slot_index);
146 
147 int io_rsrc_node_switch_start(struct io_ring_ctx *ctx);
148 int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
149 			  struct io_rsrc_node *node, void *rsrc);
150 void io_rsrc_node_switch(struct io_ring_ctx *ctx,
151 			 struct io_rsrc_data *data_to_kill);
152 bool io_is_uring_fops(struct file *file);
153 bool io_alloc_async_data(struct io_kiocb *req);
154 void io_req_task_work_add(struct io_kiocb *req);
155 void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags);
156 void io_req_task_complete(struct io_kiocb *req, bool *locked);
157 void io_req_task_queue_fail(struct io_kiocb *req, int ret);
158 void io_req_task_submit(struct io_kiocb *req, bool *locked);
159 void tctx_task_work(struct callback_head *cb);
160 int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd);
161 __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
162 int io_uring_alloc_task_context(struct task_struct *task,
163 				struct io_ring_ctx *ctx);
164 
165 int io_poll_issue(struct io_kiocb *req, bool *locked);
166 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
167 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
168 
169 struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
170 void io_wq_submit_work(struct io_wq_work *work);
171 
172 void io_free_req(struct io_kiocb *req);
173 void io_queue_next(struct io_kiocb *req);
174 
175 bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
176 			bool cancel_all);
177 
178 #define io_for_each_link(pos, head) \
179 	for (pos = (head); pos; pos = pos->link)
180 
181 #endif
182