1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef IOU_KBUF_H
3 #define IOU_KBUF_H
4
5 #include <uapi/linux/io_uring.h>
6
7 struct io_buffer_list {
8 /*
9 * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
10 * then these are classic provided buffers and ->buf_list is used.
11 */
12 union {
13 struct list_head buf_list;
14 struct {
15 struct page **buf_pages;
16 struct io_uring_buf_ring *buf_ring;
17 };
18 struct rcu_head rcu;
19 };
20 __u16 bgid;
21
22 /* below is for ring provided buffers */
23 __u16 buf_nr_pages;
24 __u16 nr_entries;
25 __u16 head;
26 __u16 mask;
27
28 atomic_t refs;
29
30 /* ring mapped provided buffers */
31 __u8 is_mapped;
32 /* ring mapped provided buffers, but mmap'ed by application */
33 __u8 is_mmap;
34 };
35
36 struct io_buffer {
37 struct list_head list;
38 __u64 addr;
39 __u32 len;
40 __u16 bid;
41 __u16 bgid;
42 };
43
44 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
45 unsigned int issue_flags);
46 void io_destroy_buffers(struct io_ring_ctx *ctx);
47
48 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
49 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
50
51 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
52 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
53
54 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
55 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
56
57 void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx);
58
59 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
60
61 void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
62
63 void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl);
64 struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
65 unsigned long bgid);
66
io_kbuf_recycle_ring(struct io_kiocb * req)67 static inline void io_kbuf_recycle_ring(struct io_kiocb *req)
68 {
69 /*
70 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
71 * the flag and hence ensure that bl->head doesn't get incremented.
72 * If the tail has already been incremented, hang on to it.
73 * The exception is partial io, that case we should increment bl->head
74 * to monopolize the buffer.
75 */
76 if (req->buf_list) {
77 if (req->flags & REQ_F_PARTIAL_IO) {
78 /*
79 * If we end up here, then the io_uring_lock has
80 * been kept held since we retrieved the buffer.
81 * For the io-wq case, we already cleared
82 * req->buf_list when the buffer was retrieved,
83 * hence it cannot be set here for that case.
84 */
85 req->buf_list->head++;
86 req->buf_list = NULL;
87 } else {
88 req->buf_index = req->buf_list->bgid;
89 req->flags &= ~REQ_F_BUFFER_RING;
90 }
91 }
92 }
93
io_do_buffer_select(struct io_kiocb * req)94 static inline bool io_do_buffer_select(struct io_kiocb *req)
95 {
96 if (!(req->flags & REQ_F_BUFFER_SELECT))
97 return false;
98 return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
99 }
100
io_kbuf_recycle(struct io_kiocb * req,unsigned issue_flags)101 static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
102 {
103 if (req->flags & REQ_F_BUFFER_SELECTED)
104 io_kbuf_recycle_legacy(req, issue_flags);
105 if (req->flags & REQ_F_BUFFER_RING)
106 io_kbuf_recycle_ring(req);
107 }
108
__io_put_kbuf_list(struct io_kiocb * req,struct list_head * list)109 static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req,
110 struct list_head *list)
111 {
112 unsigned int ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
113
114 if (req->flags & REQ_F_BUFFER_RING) {
115 if (req->buf_list) {
116 req->buf_index = req->buf_list->bgid;
117 req->buf_list->head++;
118 }
119 req->flags &= ~REQ_F_BUFFER_RING;
120 } else {
121 req->buf_index = req->kbuf->bgid;
122 list_add(&req->kbuf->list, list);
123 req->flags &= ~REQ_F_BUFFER_SELECTED;
124 }
125
126 return ret;
127 }
128
io_put_kbuf_comp(struct io_kiocb * req)129 static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
130 {
131 lockdep_assert_held(&req->ctx->completion_lock);
132
133 if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
134 return 0;
135 return __io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
136 }
137
io_put_kbuf(struct io_kiocb * req,unsigned issue_flags)138 static inline unsigned int io_put_kbuf(struct io_kiocb *req,
139 unsigned issue_flags)
140 {
141
142 if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
143 return 0;
144 return __io_put_kbuf(req, issue_flags);
145 }
146 #endif
147