xref: /openbmc/linux/io_uring/kbuf.h (revision 36db6e8484ed455bbb320d89a119378897ae991c)
1  // SPDX-License-Identifier: GPL-2.0
2  #ifndef IOU_KBUF_H
3  #define IOU_KBUF_H
4  
5  #include <uapi/linux/io_uring.h>
6  
7  struct io_buffer_list {
8  	/*
9  	 * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
10  	 * then these are classic provided buffers and ->buf_list is used.
11  	 */
12  	union {
13  		struct list_head buf_list;
14  		struct {
15  			struct page **buf_pages;
16  			struct io_uring_buf_ring *buf_ring;
17  		};
18  		struct rcu_head rcu;
19  	};
20  	__u16 bgid;
21  
22  	/* below is for ring provided buffers */
23  	__u16 buf_nr_pages;
24  	__u16 nr_entries;
25  	__u16 head;
26  	__u16 mask;
27  
28  	atomic_t refs;
29  
30  	/* ring mapped provided buffers */
31  	__u8 is_mapped;
32  	/* ring mapped provided buffers, but mmap'ed by application */
33  	__u8 is_mmap;
34  };
35  
36  struct io_buffer {
37  	struct list_head list;
38  	__u64 addr;
39  	__u32 len;
40  	__u16 bid;
41  	__u16 bgid;
42  };
43  
44  void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
45  			      unsigned int issue_flags);
46  void io_destroy_buffers(struct io_ring_ctx *ctx);
47  
48  int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
49  int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
50  
51  int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
52  int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
53  
54  int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
55  int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
56  
57  unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
58  
59  void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
60  
61  void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl);
62  struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
63  				      unsigned long bgid);
64  int io_pbuf_mmap(struct file *file, struct vm_area_struct *vma);
65  
io_kbuf_recycle_ring(struct io_kiocb * req)66  static inline void io_kbuf_recycle_ring(struct io_kiocb *req)
67  {
68  	/*
69  	 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
70  	 * the flag and hence ensure that bl->head doesn't get incremented.
71  	 * If the tail has already been incremented, hang on to it.
72  	 * The exception is partial io, that case we should increment bl->head
73  	 * to monopolize the buffer.
74  	 */
75  	if (req->buf_list) {
76  		if (req->flags & REQ_F_PARTIAL_IO) {
77  			/*
78  			 * If we end up here, then the io_uring_lock has
79  			 * been kept held since we retrieved the buffer.
80  			 * For the io-wq case, we already cleared
81  			 * req->buf_list when the buffer was retrieved,
82  			 * hence it cannot be set here for that case.
83  			 */
84  			req->buf_list->head++;
85  			req->buf_list = NULL;
86  		} else {
87  			req->buf_index = req->buf_list->bgid;
88  			req->flags &= ~REQ_F_BUFFER_RING;
89  		}
90  	}
91  }
92  
io_do_buffer_select(struct io_kiocb * req)93  static inline bool io_do_buffer_select(struct io_kiocb *req)
94  {
95  	if (!(req->flags & REQ_F_BUFFER_SELECT))
96  		return false;
97  	return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
98  }
99  
io_kbuf_recycle(struct io_kiocb * req,unsigned issue_flags)100  static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
101  {
102  	if (req->flags & REQ_F_BUFFER_SELECTED)
103  		io_kbuf_recycle_legacy(req, issue_flags);
104  	if (req->flags & REQ_F_BUFFER_RING)
105  		io_kbuf_recycle_ring(req);
106  }
107  
__io_put_kbuf_list(struct io_kiocb * req,struct list_head * list)108  static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req,
109  					      struct list_head *list)
110  {
111  	unsigned int ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
112  
113  	if (req->flags & REQ_F_BUFFER_RING) {
114  		if (req->buf_list) {
115  			req->buf_index = req->buf_list->bgid;
116  			req->buf_list->head++;
117  		}
118  		req->flags &= ~REQ_F_BUFFER_RING;
119  	} else {
120  		req->buf_index = req->kbuf->bgid;
121  		list_add(&req->kbuf->list, list);
122  		req->flags &= ~REQ_F_BUFFER_SELECTED;
123  	}
124  
125  	return ret;
126  }
127  
io_put_kbuf_comp(struct io_kiocb * req)128  static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
129  {
130  	lockdep_assert_held(&req->ctx->completion_lock);
131  
132  	if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
133  		return 0;
134  	return __io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
135  }
136  
io_put_kbuf(struct io_kiocb * req,unsigned issue_flags)137  static inline unsigned int io_put_kbuf(struct io_kiocb *req,
138  				       unsigned issue_flags)
139  {
140  
141  	if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
142  		return 0;
143  	return __io_put_kbuf(req, issue_flags);
144  }
145  #endif
146