xref: /openbmc/linux/io_uring/io_uring.h (revision f9ead18c)
1 #ifndef IOU_CORE_H
2 #define IOU_CORE_H
3 
4 #include <linux/errno.h>
5 #include <linux/lockdep.h>
6 #include "io_uring_types.h"
7 
8 enum {
9 	IOU_OK			= 0,
10 	IOU_ISSUE_SKIP_COMPLETE	= -EIOCBQUEUED,
11 };
12 
13 static inline void req_set_fail(struct io_kiocb *req)
14 {
15 	req->flags |= REQ_F_FAIL;
16 	if (req->flags & REQ_F_CQE_SKIP) {
17 		req->flags &= ~REQ_F_CQE_SKIP;
18 		req->flags |= REQ_F_SKIP_LINK_CQES;
19 	}
20 }
21 
22 static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
23 {
24 	req->cqe.res = res;
25 	req->cqe.flags = cflags;
26 }
27 
28 static inline bool req_has_async_data(struct io_kiocb *req)
29 {
30 	return req->flags & REQ_F_ASYNC_DATA;
31 }
32 
33 static inline void io_put_file(struct file *file)
34 {
35 	if (file)
36 		fput(file);
37 }
38 
39 static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
40 					 unsigned issue_flags)
41 {
42 	lockdep_assert_held(&ctx->uring_lock);
43 	if (issue_flags & IO_URING_F_UNLOCKED)
44 		mutex_unlock(&ctx->uring_lock);
45 }
46 
47 static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
48 				       unsigned issue_flags)
49 {
50 	/*
51 	 * "Normal" inline submissions always hold the uring_lock, since we
52 	 * grab it from the system call. Same is true for the SQPOLL offload.
53 	 * The only exception is when we've detached the request and issue it
54 	 * from an async worker thread, grab the lock for that case.
55 	 */
56 	if (issue_flags & IO_URING_F_UNLOCKED)
57 		mutex_lock(&ctx->uring_lock);
58 	lockdep_assert_held(&ctx->uring_lock);
59 }
60 
61 static inline void io_commit_cqring(struct io_ring_ctx *ctx)
62 {
63 	/* order cqe stores with ring update */
64 	smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
65 }
66 
67 void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
68 
69 bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
70 		     u32 cflags);
71 void io_cqring_ev_posted(struct io_ring_ctx *ctx);
72 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
73 			      unsigned int issue_flags);
74 unsigned int io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
75 
76 static inline bool io_do_buffer_select(struct io_kiocb *req)
77 {
78 	if (!(req->flags & REQ_F_BUFFER_SELECT))
79 		return false;
80 	return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
81 }
82 
83 struct file *io_file_get_normal(struct io_kiocb *req, int fd);
84 struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
85 			       unsigned issue_flags);
86 int io_fixed_fd_install(struct io_kiocb *req, unsigned int issue_flags,
87 			struct file *file, unsigned int file_slot);
88 int io_install_fixed_file(struct io_kiocb *req, struct file *file,
89 			  unsigned int issue_flags, u32 slot_index);
90 
91 int io_rsrc_node_switch_start(struct io_ring_ctx *ctx);
92 int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
93 			  struct io_rsrc_node *node, void *rsrc);
94 void io_rsrc_node_switch(struct io_ring_ctx *ctx,
95 			 struct io_rsrc_data *data_to_kill);
96 bool io_is_uring_fops(struct file *file);
97 bool io_alloc_async_data(struct io_kiocb *req);
98 void io_req_task_work_add(struct io_kiocb *req);
99 
100 #endif
101