xref: /openbmc/linux/io_uring/kbuf.c (revision baf2c002)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/poll.h>
10 #include <linux/io_uring.h>
11 
12 #include <uapi/linux/io_uring.h>
13 
14 #include "io_uring.h"
15 #include "opdef.h"
16 #include "kbuf.h"
17 
18 #define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf))
19 
20 #define BGID_ARRAY	64
21 
22 struct io_provide_buf {
23 	struct file			*file;
24 	__u64				addr;
25 	__u32				len;
26 	__u32				bgid;
27 	__u16				nbufs;
28 	__u16				bid;
29 };
30 
31 static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
32 							unsigned int bgid)
33 {
34 	if (ctx->io_bl && bgid < BGID_ARRAY)
35 		return &ctx->io_bl[bgid];
36 
37 	return xa_load(&ctx->io_bl_xa, bgid);
38 }
39 
40 static int io_buffer_add_list(struct io_ring_ctx *ctx,
41 			      struct io_buffer_list *bl, unsigned int bgid)
42 {
43 	bl->bgid = bgid;
44 	if (bgid < BGID_ARRAY)
45 		return 0;
46 
47 	return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
48 }
49 
50 void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
51 {
52 	struct io_ring_ctx *ctx = req->ctx;
53 	struct io_buffer_list *bl;
54 	struct io_buffer *buf;
55 
56 	/*
57 	 * For legacy provided buffer mode, don't recycle if we already did
58 	 * IO to this buffer. For ring-mapped provided buffer mode, we should
59 	 * increment ring->head to explicitly monopolize the buffer to avoid
60 	 * multiple use.
61 	 */
62 	if (req->flags & REQ_F_PARTIAL_IO)
63 		return;
64 
65 	io_ring_submit_lock(ctx, issue_flags);
66 
67 	buf = req->kbuf;
68 	bl = io_buffer_get_list(ctx, buf->bgid);
69 	list_add(&buf->list, &bl->buf_list);
70 	req->flags &= ~REQ_F_BUFFER_SELECTED;
71 	req->buf_index = buf->bgid;
72 
73 	io_ring_submit_unlock(ctx, issue_flags);
74 	return;
75 }
76 
77 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
78 {
79 	unsigned int cflags;
80 
81 	/*
82 	 * We can add this buffer back to two lists:
83 	 *
84 	 * 1) The io_buffers_cache list. This one is protected by the
85 	 *    ctx->uring_lock. If we already hold this lock, add back to this
86 	 *    list as we can grab it from issue as well.
87 	 * 2) The io_buffers_comp list. This one is protected by the
88 	 *    ctx->completion_lock.
89 	 *
90 	 * We migrate buffers from the comp_list to the issue cache list
91 	 * when we need one.
92 	 */
93 	if (req->flags & REQ_F_BUFFER_RING) {
94 		/* no buffers to recycle for this case */
95 		cflags = __io_put_kbuf_list(req, NULL);
96 	} else if (issue_flags & IO_URING_F_UNLOCKED) {
97 		struct io_ring_ctx *ctx = req->ctx;
98 
99 		spin_lock(&ctx->completion_lock);
100 		cflags = __io_put_kbuf_list(req, &ctx->io_buffers_comp);
101 		spin_unlock(&ctx->completion_lock);
102 	} else {
103 		lockdep_assert_held(&req->ctx->uring_lock);
104 
105 		cflags = __io_put_kbuf_list(req, &req->ctx->io_buffers_cache);
106 	}
107 	return cflags;
108 }
109 
110 static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
111 					      struct io_buffer_list *bl)
112 {
113 	if (!list_empty(&bl->buf_list)) {
114 		struct io_buffer *kbuf;
115 
116 		kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
117 		list_del(&kbuf->list);
118 		if (*len == 0 || *len > kbuf->len)
119 			*len = kbuf->len;
120 		req->flags |= REQ_F_BUFFER_SELECTED;
121 		req->kbuf = kbuf;
122 		req->buf_index = kbuf->bid;
123 		return u64_to_user_ptr(kbuf->addr);
124 	}
125 	return NULL;
126 }
127 
128 static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
129 					  struct io_buffer_list *bl,
130 					  unsigned int issue_flags)
131 {
132 	struct io_uring_buf_ring *br = bl->buf_ring;
133 	struct io_uring_buf *buf;
134 	__u16 head = bl->head;
135 
136 	if (unlikely(smp_load_acquire(&br->tail) == head))
137 		return NULL;
138 
139 	head &= bl->mask;
140 	if (head < IO_BUFFER_LIST_BUF_PER_PAGE) {
141 		buf = &br->bufs[head];
142 	} else {
143 		int off = head & (IO_BUFFER_LIST_BUF_PER_PAGE - 1);
144 		int index = head / IO_BUFFER_LIST_BUF_PER_PAGE;
145 		buf = page_address(bl->buf_pages[index]);
146 		buf += off;
147 	}
148 	if (*len == 0 || *len > buf->len)
149 		*len = buf->len;
150 	req->flags |= REQ_F_BUFFER_RING;
151 	req->buf_list = bl;
152 	req->buf_index = buf->bid;
153 
154 	if (issue_flags & IO_URING_F_UNLOCKED || !file_can_poll(req->file)) {
155 		/*
156 		 * If we came in unlocked, we have no choice but to consume the
157 		 * buffer here, otherwise nothing ensures that the buffer won't
158 		 * get used by others. This does mean it'll be pinned until the
159 		 * IO completes, coming in unlocked means we're being called from
160 		 * io-wq context and there may be further retries in async hybrid
161 		 * mode. For the locked case, the caller must call commit when
162 		 * the transfer completes (or if we get -EAGAIN and must poll of
163 		 * retry).
164 		 */
165 		req->buf_list = NULL;
166 		bl->head++;
167 	}
168 	return u64_to_user_ptr(buf->addr);
169 }
170 
171 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
172 			      unsigned int issue_flags)
173 {
174 	struct io_ring_ctx *ctx = req->ctx;
175 	struct io_buffer_list *bl;
176 	void __user *ret = NULL;
177 
178 	io_ring_submit_lock(req->ctx, issue_flags);
179 
180 	bl = io_buffer_get_list(ctx, req->buf_index);
181 	if (likely(bl)) {
182 		if (bl->buf_nr_pages)
183 			ret = io_ring_buffer_select(req, len, bl, issue_flags);
184 		else
185 			ret = io_provided_buffer_select(req, len, bl);
186 	}
187 	io_ring_submit_unlock(req->ctx, issue_flags);
188 	return ret;
189 }
190 
191 static __cold int io_init_bl_list(struct io_ring_ctx *ctx)
192 {
193 	int i;
194 
195 	ctx->io_bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list),
196 				GFP_KERNEL);
197 	if (!ctx->io_bl)
198 		return -ENOMEM;
199 
200 	for (i = 0; i < BGID_ARRAY; i++) {
201 		INIT_LIST_HEAD(&ctx->io_bl[i].buf_list);
202 		ctx->io_bl[i].bgid = i;
203 	}
204 
205 	return 0;
206 }
207 
208 static int __io_remove_buffers(struct io_ring_ctx *ctx,
209 			       struct io_buffer_list *bl, unsigned nbufs)
210 {
211 	unsigned i = 0;
212 
213 	/* shouldn't happen */
214 	if (!nbufs)
215 		return 0;
216 
217 	if (bl->buf_nr_pages) {
218 		int j;
219 
220 		i = bl->buf_ring->tail - bl->head;
221 		for (j = 0; j < bl->buf_nr_pages; j++)
222 			unpin_user_page(bl->buf_pages[j]);
223 		kvfree(bl->buf_pages);
224 		bl->buf_pages = NULL;
225 		bl->buf_nr_pages = 0;
226 		/* make sure it's seen as empty */
227 		INIT_LIST_HEAD(&bl->buf_list);
228 		return i;
229 	}
230 
231 	/* the head kbuf is the list itself */
232 	while (!list_empty(&bl->buf_list)) {
233 		struct io_buffer *nxt;
234 
235 		nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
236 		list_del(&nxt->list);
237 		if (++i == nbufs)
238 			return i;
239 		cond_resched();
240 	}
241 	i++;
242 
243 	return i;
244 }
245 
246 void io_destroy_buffers(struct io_ring_ctx *ctx)
247 {
248 	struct io_buffer_list *bl;
249 	unsigned long index;
250 	int i;
251 
252 	for (i = 0; i < BGID_ARRAY; i++) {
253 		if (!ctx->io_bl)
254 			break;
255 		__io_remove_buffers(ctx, &ctx->io_bl[i], -1U);
256 	}
257 
258 	xa_for_each(&ctx->io_bl_xa, index, bl) {
259 		xa_erase(&ctx->io_bl_xa, bl->bgid);
260 		__io_remove_buffers(ctx, bl, -1U);
261 		kfree(bl);
262 	}
263 
264 	while (!list_empty(&ctx->io_buffers_pages)) {
265 		struct page *page;
266 
267 		page = list_first_entry(&ctx->io_buffers_pages, struct page, lru);
268 		list_del_init(&page->lru);
269 		__free_page(page);
270 	}
271 }
272 
273 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
274 {
275 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
276 	u64 tmp;
277 
278 	if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
279 	    sqe->splice_fd_in)
280 		return -EINVAL;
281 
282 	tmp = READ_ONCE(sqe->fd);
283 	if (!tmp || tmp > USHRT_MAX)
284 		return -EINVAL;
285 
286 	memset(p, 0, sizeof(*p));
287 	p->nbufs = tmp;
288 	p->bgid = READ_ONCE(sqe->buf_group);
289 	return 0;
290 }
291 
292 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
293 {
294 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
295 	struct io_ring_ctx *ctx = req->ctx;
296 	struct io_buffer_list *bl;
297 	int ret = 0;
298 
299 	io_ring_submit_lock(ctx, issue_flags);
300 
301 	ret = -ENOENT;
302 	bl = io_buffer_get_list(ctx, p->bgid);
303 	if (bl) {
304 		ret = -EINVAL;
305 		/* can't use provide/remove buffers command on mapped buffers */
306 		if (!bl->buf_nr_pages)
307 			ret = __io_remove_buffers(ctx, bl, p->nbufs);
308 	}
309 	if (ret < 0)
310 		req_set_fail(req);
311 
312 	/* complete before unlock, IOPOLL may need the lock */
313 	io_req_set_res(req, ret, 0);
314 	__io_req_complete(req, issue_flags);
315 	io_ring_submit_unlock(ctx, issue_flags);
316 	return IOU_ISSUE_SKIP_COMPLETE;
317 }
318 
319 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
320 {
321 	unsigned long size, tmp_check;
322 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
323 	u64 tmp;
324 
325 	if (sqe->rw_flags || sqe->splice_fd_in)
326 		return -EINVAL;
327 
328 	tmp = READ_ONCE(sqe->fd);
329 	if (!tmp || tmp > USHRT_MAX)
330 		return -E2BIG;
331 	p->nbufs = tmp;
332 	p->addr = READ_ONCE(sqe->addr);
333 	p->len = READ_ONCE(sqe->len);
334 
335 	if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
336 				&size))
337 		return -EOVERFLOW;
338 	if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
339 		return -EOVERFLOW;
340 
341 	size = (unsigned long)p->len * p->nbufs;
342 	if (!access_ok(u64_to_user_ptr(p->addr), size))
343 		return -EFAULT;
344 
345 	p->bgid = READ_ONCE(sqe->buf_group);
346 	tmp = READ_ONCE(sqe->off);
347 	if (tmp > USHRT_MAX)
348 		return -E2BIG;
349 	p->bid = tmp;
350 	return 0;
351 }
352 
353 static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
354 {
355 	struct io_buffer *buf;
356 	struct page *page;
357 	int bufs_in_page;
358 
359 	/*
360 	 * Completions that don't happen inline (eg not under uring_lock) will
361 	 * add to ->io_buffers_comp. If we don't have any free buffers, check
362 	 * the completion list and splice those entries first.
363 	 */
364 	if (!list_empty_careful(&ctx->io_buffers_comp)) {
365 		spin_lock(&ctx->completion_lock);
366 		if (!list_empty(&ctx->io_buffers_comp)) {
367 			list_splice_init(&ctx->io_buffers_comp,
368 						&ctx->io_buffers_cache);
369 			spin_unlock(&ctx->completion_lock);
370 			return 0;
371 		}
372 		spin_unlock(&ctx->completion_lock);
373 	}
374 
375 	/*
376 	 * No free buffers and no completion entries either. Allocate a new
377 	 * page worth of buffer entries and add those to our freelist.
378 	 */
379 	page = alloc_page(GFP_KERNEL_ACCOUNT);
380 	if (!page)
381 		return -ENOMEM;
382 
383 	list_add(&page->lru, &ctx->io_buffers_pages);
384 
385 	buf = page_address(page);
386 	bufs_in_page = PAGE_SIZE / sizeof(*buf);
387 	while (bufs_in_page) {
388 		list_add_tail(&buf->list, &ctx->io_buffers_cache);
389 		buf++;
390 		bufs_in_page--;
391 	}
392 
393 	return 0;
394 }
395 
396 static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
397 			  struct io_buffer_list *bl)
398 {
399 	struct io_buffer *buf;
400 	u64 addr = pbuf->addr;
401 	int i, bid = pbuf->bid;
402 
403 	for (i = 0; i < pbuf->nbufs; i++) {
404 		if (list_empty(&ctx->io_buffers_cache) &&
405 		    io_refill_buffer_cache(ctx))
406 			break;
407 		buf = list_first_entry(&ctx->io_buffers_cache, struct io_buffer,
408 					list);
409 		list_move_tail(&buf->list, &bl->buf_list);
410 		buf->addr = addr;
411 		buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
412 		buf->bid = bid;
413 		buf->bgid = pbuf->bgid;
414 		addr += pbuf->len;
415 		bid++;
416 		cond_resched();
417 	}
418 
419 	return i ? 0 : -ENOMEM;
420 }
421 
422 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
423 {
424 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
425 	struct io_ring_ctx *ctx = req->ctx;
426 	struct io_buffer_list *bl;
427 	int ret = 0;
428 
429 	io_ring_submit_lock(ctx, issue_flags);
430 
431 	if (unlikely(p->bgid < BGID_ARRAY && !ctx->io_bl)) {
432 		ret = io_init_bl_list(ctx);
433 		if (ret)
434 			goto err;
435 	}
436 
437 	bl = io_buffer_get_list(ctx, p->bgid);
438 	if (unlikely(!bl)) {
439 		bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
440 		if (!bl) {
441 			ret = -ENOMEM;
442 			goto err;
443 		}
444 		INIT_LIST_HEAD(&bl->buf_list);
445 		ret = io_buffer_add_list(ctx, bl, p->bgid);
446 		if (ret) {
447 			kfree(bl);
448 			goto err;
449 		}
450 	}
451 	/* can't add buffers via this command for a mapped buffer ring */
452 	if (bl->buf_nr_pages) {
453 		ret = -EINVAL;
454 		goto err;
455 	}
456 
457 	ret = io_add_buffers(ctx, p, bl);
458 err:
459 	if (ret < 0)
460 		req_set_fail(req);
461 	/* complete before unlock, IOPOLL may need the lock */
462 	io_req_set_res(req, ret, 0);
463 	__io_req_complete(req, issue_flags);
464 	io_ring_submit_unlock(ctx, issue_flags);
465 	return IOU_ISSUE_SKIP_COMPLETE;
466 }
467 
468 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
469 {
470 	struct io_uring_buf_ring *br;
471 	struct io_uring_buf_reg reg;
472 	struct io_buffer_list *bl, *free_bl = NULL;
473 	struct page **pages;
474 	int nr_pages;
475 
476 	if (copy_from_user(&reg, arg, sizeof(reg)))
477 		return -EFAULT;
478 
479 	if (reg.pad || reg.resv[0] || reg.resv[1] || reg.resv[2])
480 		return -EINVAL;
481 	if (!reg.ring_addr)
482 		return -EFAULT;
483 	if (reg.ring_addr & ~PAGE_MASK)
484 		return -EINVAL;
485 	if (!is_power_of_2(reg.ring_entries))
486 		return -EINVAL;
487 
488 	/* cannot disambiguate full vs empty due to head/tail size */
489 	if (reg.ring_entries >= 65536)
490 		return -EINVAL;
491 
492 	if (unlikely(reg.bgid < BGID_ARRAY && !ctx->io_bl)) {
493 		int ret = io_init_bl_list(ctx);
494 		if (ret)
495 			return ret;
496 	}
497 
498 	bl = io_buffer_get_list(ctx, reg.bgid);
499 	if (bl) {
500 		/* if mapped buffer ring OR classic exists, don't allow */
501 		if (bl->buf_nr_pages || !list_empty(&bl->buf_list))
502 			return -EEXIST;
503 	} else {
504 		free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
505 		if (!bl)
506 			return -ENOMEM;
507 	}
508 
509 	pages = io_pin_pages(reg.ring_addr,
510 			     struct_size(br, bufs, reg.ring_entries),
511 			     &nr_pages);
512 	if (IS_ERR(pages)) {
513 		kfree(free_bl);
514 		return PTR_ERR(pages);
515 	}
516 
517 	br = page_address(pages[0]);
518 	bl->buf_pages = pages;
519 	bl->buf_nr_pages = nr_pages;
520 	bl->nr_entries = reg.ring_entries;
521 	bl->buf_ring = br;
522 	bl->mask = reg.ring_entries - 1;
523 	io_buffer_add_list(ctx, bl, reg.bgid);
524 	return 0;
525 }
526 
527 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
528 {
529 	struct io_uring_buf_reg reg;
530 	struct io_buffer_list *bl;
531 
532 	if (copy_from_user(&reg, arg, sizeof(reg)))
533 		return -EFAULT;
534 	if (reg.pad || reg.resv[0] || reg.resv[1] || reg.resv[2])
535 		return -EINVAL;
536 
537 	bl = io_buffer_get_list(ctx, reg.bgid);
538 	if (!bl)
539 		return -ENOENT;
540 	if (!bl->buf_nr_pages)
541 		return -EINVAL;
542 
543 	__io_remove_buffers(ctx, bl, -1U);
544 	if (bl->bgid >= BGID_ARRAY) {
545 		xa_erase(&ctx->io_bl_xa, bl->bgid);
546 		kfree(bl);
547 	}
548 	return 0;
549 }
550