xref: /openbmc/linux/io_uring/kbuf.h (revision 0e407915)
1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef IOU_KBUF_H
3 #define IOU_KBUF_H
4 
5 #include <uapi/linux/io_uring.h>
6 
7 struct io_buffer_list {
8 	/*
9 	 * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
10 	 * then these are classic provided buffers and ->buf_list is used.
11 	 */
12 	union {
13 		struct list_head buf_list;
14 		struct {
15 			struct page **buf_pages;
16 			struct io_uring_buf_ring *buf_ring;
17 		};
18 	};
19 	__u16 bgid;
20 
21 	/* below is for ring provided buffers */
22 	__u16 buf_nr_pages;
23 	__u16 nr_entries;
24 	__u16 head;
25 	__u16 mask;
26 };
27 
28 struct io_buffer {
29 	struct list_head list;
30 	__u64 addr;
31 	__u32 len;
32 	__u16 bid;
33 	__u16 bgid;
34 };
35 
36 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
37 			      unsigned int issue_flags);
38 void io_destroy_buffers(struct io_ring_ctx *ctx);
39 
40 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
41 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
42 
43 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
44 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
45 
46 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
47 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
48 
49 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
50 
51 void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
52 
53 static inline void io_kbuf_recycle_ring(struct io_kiocb *req)
54 {
55 	/*
56 	 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
57 	 * the flag and hence ensure that bl->head doesn't get incremented.
58 	 * If the tail has already been incremented, hang on to it.
59 	 * The exception is partial io, that case we should increment bl->head
60 	 * to monopolize the buffer.
61 	 */
62 	if (req->buf_list) {
63 		if (req->flags & REQ_F_PARTIAL_IO) {
64 			/*
65 			 * If we end up here, then the io_uring_lock has
66 			 * been kept held since we retrieved the buffer.
67 			 * For the io-wq case, we already cleared
68 			 * req->buf_list when the buffer was retrieved,
69 			 * hence it cannot be set here for that case.
70 			 */
71 			req->buf_list->head++;
72 			req->buf_list = NULL;
73 		} else {
74 			req->buf_index = req->buf_list->bgid;
75 			req->flags &= ~REQ_F_BUFFER_RING;
76 		}
77 	}
78 }
79 
80 static inline bool io_do_buffer_select(struct io_kiocb *req)
81 {
82 	if (!(req->flags & REQ_F_BUFFER_SELECT))
83 		return false;
84 	return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
85 }
86 
87 static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
88 {
89 	/*
90 	 * READV uses fields in `struct io_rw` (len/addr) to stash the selected
91 	 * buffer data. However if that buffer is recycled the original request
92 	 * data stored in addr is lost. Therefore forbid recycling for now.
93 	 */
94 	if (req->opcode == IORING_OP_READV) {
95 		if ((req->flags & REQ_F_BUFFER_RING) && req->buf_list) {
96 			req->buf_list->head++;
97 			req->buf_list = NULL;
98 		}
99 		return;
100 	}
101 	if (req->flags & REQ_F_BUFFER_SELECTED)
102 		io_kbuf_recycle_legacy(req, issue_flags);
103 	if (req->flags & REQ_F_BUFFER_RING)
104 		io_kbuf_recycle_ring(req);
105 }
106 
107 static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req,
108 					      struct list_head *list)
109 {
110 	unsigned int ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
111 
112 	if (req->flags & REQ_F_BUFFER_RING) {
113 		if (req->buf_list) {
114 			req->buf_index = req->buf_list->bgid;
115 			req->buf_list->head++;
116 		}
117 		req->flags &= ~REQ_F_BUFFER_RING;
118 	} else {
119 		req->buf_index = req->kbuf->bgid;
120 		list_add(&req->kbuf->list, list);
121 		req->flags &= ~REQ_F_BUFFER_SELECTED;
122 	}
123 
124 	return ret;
125 }
126 
127 static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
128 {
129 	lockdep_assert_held(&req->ctx->completion_lock);
130 
131 	if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
132 		return 0;
133 	return __io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
134 }
135 
136 static inline unsigned int io_put_kbuf(struct io_kiocb *req,
137 				       unsigned issue_flags)
138 {
139 
140 	if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
141 		return 0;
142 	return __io_put_kbuf(req, issue_flags);
143 }
144 #endif
145