xref: /openbmc/linux/io_uring/kbuf.c (revision cb051977)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/poll.h>
10 #include <linux/io_uring.h>
11 
12 #include <uapi/linux/io_uring.h>
13 
14 #include "io_uring.h"
15 #include "opdef.h"
16 #include "kbuf.h"
17 
18 #define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf))
19 
20 #define BGID_ARRAY	64
21 
22 /* BIDs are addressed by a 16-bit field in a CQE */
23 #define MAX_BIDS_PER_BGID (1 << 16)
24 
25 struct io_provide_buf {
26 	struct file			*file;
27 	__u64				addr;
28 	__u32				len;
29 	__u32				bgid;
30 	__u32				nbufs;
31 	__u16				bid;
32 };
33 
34 static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
35 							unsigned int bgid)
36 {
37 	if (ctx->io_bl && bgid < BGID_ARRAY)
38 		return &ctx->io_bl[bgid];
39 
40 	return xa_load(&ctx->io_bl_xa, bgid);
41 }
42 
43 static int io_buffer_add_list(struct io_ring_ctx *ctx,
44 			      struct io_buffer_list *bl, unsigned int bgid)
45 {
46 	bl->bgid = bgid;
47 	if (bgid < BGID_ARRAY)
48 		return 0;
49 
50 	return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
51 }
52 
53 void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
54 {
55 	struct io_ring_ctx *ctx = req->ctx;
56 	struct io_buffer_list *bl;
57 	struct io_buffer *buf;
58 
59 	/*
60 	 * For legacy provided buffer mode, don't recycle if we already did
61 	 * IO to this buffer. For ring-mapped provided buffer mode, we should
62 	 * increment ring->head to explicitly monopolize the buffer to avoid
63 	 * multiple use.
64 	 */
65 	if (req->flags & REQ_F_PARTIAL_IO)
66 		return;
67 
68 	io_ring_submit_lock(ctx, issue_flags);
69 
70 	buf = req->kbuf;
71 	bl = io_buffer_get_list(ctx, buf->bgid);
72 	list_add(&buf->list, &bl->buf_list);
73 	req->flags &= ~REQ_F_BUFFER_SELECTED;
74 	req->buf_index = buf->bgid;
75 
76 	io_ring_submit_unlock(ctx, issue_flags);
77 	return;
78 }
79 
80 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
81 {
82 	unsigned int cflags;
83 
84 	/*
85 	 * We can add this buffer back to two lists:
86 	 *
87 	 * 1) The io_buffers_cache list. This one is protected by the
88 	 *    ctx->uring_lock. If we already hold this lock, add back to this
89 	 *    list as we can grab it from issue as well.
90 	 * 2) The io_buffers_comp list. This one is protected by the
91 	 *    ctx->completion_lock.
92 	 *
93 	 * We migrate buffers from the comp_list to the issue cache list
94 	 * when we need one.
95 	 */
96 	if (req->flags & REQ_F_BUFFER_RING) {
97 		/* no buffers to recycle for this case */
98 		cflags = __io_put_kbuf_list(req, NULL);
99 	} else if (issue_flags & IO_URING_F_UNLOCKED) {
100 		struct io_ring_ctx *ctx = req->ctx;
101 
102 		spin_lock(&ctx->completion_lock);
103 		cflags = __io_put_kbuf_list(req, &ctx->io_buffers_comp);
104 		spin_unlock(&ctx->completion_lock);
105 	} else {
106 		lockdep_assert_held(&req->ctx->uring_lock);
107 
108 		cflags = __io_put_kbuf_list(req, &req->ctx->io_buffers_cache);
109 	}
110 	return cflags;
111 }
112 
113 static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
114 					      struct io_buffer_list *bl)
115 {
116 	if (!list_empty(&bl->buf_list)) {
117 		struct io_buffer *kbuf;
118 
119 		kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
120 		list_del(&kbuf->list);
121 		if (*len == 0 || *len > kbuf->len)
122 			*len = kbuf->len;
123 		req->flags |= REQ_F_BUFFER_SELECTED;
124 		req->kbuf = kbuf;
125 		req->buf_index = kbuf->bid;
126 		return u64_to_user_ptr(kbuf->addr);
127 	}
128 	return NULL;
129 }
130 
131 static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
132 					  struct io_buffer_list *bl,
133 					  unsigned int issue_flags)
134 {
135 	struct io_uring_buf_ring *br = bl->buf_ring;
136 	struct io_uring_buf *buf;
137 	__u16 head = bl->head;
138 
139 	if (unlikely(smp_load_acquire(&br->tail) == head))
140 		return NULL;
141 
142 	head &= bl->mask;
143 	/* mmaped buffers are always contig */
144 	if (bl->is_mmap || head < IO_BUFFER_LIST_BUF_PER_PAGE) {
145 		buf = &br->bufs[head];
146 	} else {
147 		int off = head & (IO_BUFFER_LIST_BUF_PER_PAGE - 1);
148 		int index = head / IO_BUFFER_LIST_BUF_PER_PAGE;
149 		buf = page_address(bl->buf_pages[index]);
150 		buf += off;
151 	}
152 	if (*len == 0 || *len > buf->len)
153 		*len = buf->len;
154 	req->flags |= REQ_F_BUFFER_RING;
155 	req->buf_list = bl;
156 	req->buf_index = buf->bid;
157 
158 	if (issue_flags & IO_URING_F_UNLOCKED || !file_can_poll(req->file)) {
159 		/*
160 		 * If we came in unlocked, we have no choice but to consume the
161 		 * buffer here, otherwise nothing ensures that the buffer won't
162 		 * get used by others. This does mean it'll be pinned until the
163 		 * IO completes, coming in unlocked means we're being called from
164 		 * io-wq context and there may be further retries in async hybrid
165 		 * mode. For the locked case, the caller must call commit when
166 		 * the transfer completes (or if we get -EAGAIN and must poll of
167 		 * retry).
168 		 */
169 		req->buf_list = NULL;
170 		bl->head++;
171 	}
172 	return u64_to_user_ptr(buf->addr);
173 }
174 
175 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
176 			      unsigned int issue_flags)
177 {
178 	struct io_ring_ctx *ctx = req->ctx;
179 	struct io_buffer_list *bl;
180 	void __user *ret = NULL;
181 
182 	io_ring_submit_lock(req->ctx, issue_flags);
183 
184 	bl = io_buffer_get_list(ctx, req->buf_index);
185 	if (likely(bl)) {
186 		if (bl->is_mapped)
187 			ret = io_ring_buffer_select(req, len, bl, issue_flags);
188 		else
189 			ret = io_provided_buffer_select(req, len, bl);
190 	}
191 	io_ring_submit_unlock(req->ctx, issue_flags);
192 	return ret;
193 }
194 
195 static __cold int io_init_bl_list(struct io_ring_ctx *ctx)
196 {
197 	int i;
198 
199 	ctx->io_bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list),
200 				GFP_KERNEL);
201 	if (!ctx->io_bl)
202 		return -ENOMEM;
203 
204 	for (i = 0; i < BGID_ARRAY; i++) {
205 		INIT_LIST_HEAD(&ctx->io_bl[i].buf_list);
206 		ctx->io_bl[i].bgid = i;
207 	}
208 
209 	return 0;
210 }
211 
212 static int __io_remove_buffers(struct io_ring_ctx *ctx,
213 			       struct io_buffer_list *bl, unsigned nbufs)
214 {
215 	unsigned i = 0;
216 
217 	/* shouldn't happen */
218 	if (!nbufs)
219 		return 0;
220 
221 	if (bl->is_mapped) {
222 		i = bl->buf_ring->tail - bl->head;
223 		if (bl->is_mmap) {
224 			folio_put(virt_to_folio(bl->buf_ring));
225 			bl->buf_ring = NULL;
226 			bl->is_mmap = 0;
227 		} else if (bl->buf_nr_pages) {
228 			int j;
229 
230 			for (j = 0; j < bl->buf_nr_pages; j++)
231 				unpin_user_page(bl->buf_pages[j]);
232 			kvfree(bl->buf_pages);
233 			bl->buf_pages = NULL;
234 			bl->buf_nr_pages = 0;
235 		}
236 		/* make sure it's seen as empty */
237 		INIT_LIST_HEAD(&bl->buf_list);
238 		bl->is_mapped = 0;
239 		return i;
240 	}
241 
242 	/* protects io_buffers_cache */
243 	lockdep_assert_held(&ctx->uring_lock);
244 
245 	while (!list_empty(&bl->buf_list)) {
246 		struct io_buffer *nxt;
247 
248 		nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
249 		list_move(&nxt->list, &ctx->io_buffers_cache);
250 		if (++i == nbufs)
251 			return i;
252 		cond_resched();
253 	}
254 
255 	return i;
256 }
257 
258 void io_destroy_buffers(struct io_ring_ctx *ctx)
259 {
260 	struct io_buffer_list *bl;
261 	unsigned long index;
262 	int i;
263 
264 	for (i = 0; i < BGID_ARRAY; i++) {
265 		if (!ctx->io_bl)
266 			break;
267 		__io_remove_buffers(ctx, &ctx->io_bl[i], -1U);
268 	}
269 
270 	xa_for_each(&ctx->io_bl_xa, index, bl) {
271 		xa_erase(&ctx->io_bl_xa, bl->bgid);
272 		__io_remove_buffers(ctx, bl, -1U);
273 		kfree(bl);
274 	}
275 
276 	while (!list_empty(&ctx->io_buffers_pages)) {
277 		struct page *page;
278 
279 		page = list_first_entry(&ctx->io_buffers_pages, struct page, lru);
280 		list_del_init(&page->lru);
281 		__free_page(page);
282 	}
283 }
284 
285 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
286 {
287 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
288 	u64 tmp;
289 
290 	if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
291 	    sqe->splice_fd_in)
292 		return -EINVAL;
293 
294 	tmp = READ_ONCE(sqe->fd);
295 	if (!tmp || tmp > MAX_BIDS_PER_BGID)
296 		return -EINVAL;
297 
298 	memset(p, 0, sizeof(*p));
299 	p->nbufs = tmp;
300 	p->bgid = READ_ONCE(sqe->buf_group);
301 	return 0;
302 }
303 
304 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
305 {
306 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
307 	struct io_ring_ctx *ctx = req->ctx;
308 	struct io_buffer_list *bl;
309 	int ret = 0;
310 
311 	io_ring_submit_lock(ctx, issue_flags);
312 
313 	ret = -ENOENT;
314 	bl = io_buffer_get_list(ctx, p->bgid);
315 	if (bl) {
316 		ret = -EINVAL;
317 		/* can't use provide/remove buffers command on mapped buffers */
318 		if (!bl->is_mapped)
319 			ret = __io_remove_buffers(ctx, bl, p->nbufs);
320 	}
321 	io_ring_submit_unlock(ctx, issue_flags);
322 	if (ret < 0)
323 		req_set_fail(req);
324 	io_req_set_res(req, ret, 0);
325 	return IOU_OK;
326 }
327 
328 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
329 {
330 	unsigned long size, tmp_check;
331 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
332 	u64 tmp;
333 
334 	if (sqe->rw_flags || sqe->splice_fd_in)
335 		return -EINVAL;
336 
337 	tmp = READ_ONCE(sqe->fd);
338 	if (!tmp || tmp > MAX_BIDS_PER_BGID)
339 		return -E2BIG;
340 	p->nbufs = tmp;
341 	p->addr = READ_ONCE(sqe->addr);
342 	p->len = READ_ONCE(sqe->len);
343 
344 	if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
345 				&size))
346 		return -EOVERFLOW;
347 	if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
348 		return -EOVERFLOW;
349 
350 	size = (unsigned long)p->len * p->nbufs;
351 	if (!access_ok(u64_to_user_ptr(p->addr), size))
352 		return -EFAULT;
353 
354 	p->bgid = READ_ONCE(sqe->buf_group);
355 	tmp = READ_ONCE(sqe->off);
356 	if (tmp > USHRT_MAX)
357 		return -E2BIG;
358 	if (tmp + p->nbufs > MAX_BIDS_PER_BGID)
359 		return -EINVAL;
360 	p->bid = tmp;
361 	return 0;
362 }
363 
364 static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
365 {
366 	struct io_buffer *buf;
367 	struct page *page;
368 	int bufs_in_page;
369 
370 	/*
371 	 * Completions that don't happen inline (eg not under uring_lock) will
372 	 * add to ->io_buffers_comp. If we don't have any free buffers, check
373 	 * the completion list and splice those entries first.
374 	 */
375 	if (!list_empty_careful(&ctx->io_buffers_comp)) {
376 		spin_lock(&ctx->completion_lock);
377 		if (!list_empty(&ctx->io_buffers_comp)) {
378 			list_splice_init(&ctx->io_buffers_comp,
379 						&ctx->io_buffers_cache);
380 			spin_unlock(&ctx->completion_lock);
381 			return 0;
382 		}
383 		spin_unlock(&ctx->completion_lock);
384 	}
385 
386 	/*
387 	 * No free buffers and no completion entries either. Allocate a new
388 	 * page worth of buffer entries and add those to our freelist.
389 	 */
390 	page = alloc_page(GFP_KERNEL_ACCOUNT);
391 	if (!page)
392 		return -ENOMEM;
393 
394 	list_add(&page->lru, &ctx->io_buffers_pages);
395 
396 	buf = page_address(page);
397 	bufs_in_page = PAGE_SIZE / sizeof(*buf);
398 	while (bufs_in_page) {
399 		list_add_tail(&buf->list, &ctx->io_buffers_cache);
400 		buf++;
401 		bufs_in_page--;
402 	}
403 
404 	return 0;
405 }
406 
407 static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
408 			  struct io_buffer_list *bl)
409 {
410 	struct io_buffer *buf;
411 	u64 addr = pbuf->addr;
412 	int i, bid = pbuf->bid;
413 
414 	for (i = 0; i < pbuf->nbufs; i++) {
415 		if (list_empty(&ctx->io_buffers_cache) &&
416 		    io_refill_buffer_cache(ctx))
417 			break;
418 		buf = list_first_entry(&ctx->io_buffers_cache, struct io_buffer,
419 					list);
420 		list_move_tail(&buf->list, &bl->buf_list);
421 		buf->addr = addr;
422 		buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
423 		buf->bid = bid;
424 		buf->bgid = pbuf->bgid;
425 		addr += pbuf->len;
426 		bid++;
427 		cond_resched();
428 	}
429 
430 	return i ? 0 : -ENOMEM;
431 }
432 
433 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
434 {
435 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
436 	struct io_ring_ctx *ctx = req->ctx;
437 	struct io_buffer_list *bl;
438 	int ret = 0;
439 
440 	io_ring_submit_lock(ctx, issue_flags);
441 
442 	if (unlikely(p->bgid < BGID_ARRAY && !ctx->io_bl)) {
443 		ret = io_init_bl_list(ctx);
444 		if (ret)
445 			goto err;
446 	}
447 
448 	bl = io_buffer_get_list(ctx, p->bgid);
449 	if (unlikely(!bl)) {
450 		bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
451 		if (!bl) {
452 			ret = -ENOMEM;
453 			goto err;
454 		}
455 		INIT_LIST_HEAD(&bl->buf_list);
456 		ret = io_buffer_add_list(ctx, bl, p->bgid);
457 		if (ret) {
458 			kfree(bl);
459 			goto err;
460 		}
461 	}
462 	/* can't add buffers via this command for a mapped buffer ring */
463 	if (bl->is_mapped) {
464 		ret = -EINVAL;
465 		goto err;
466 	}
467 
468 	ret = io_add_buffers(ctx, p, bl);
469 err:
470 	io_ring_submit_unlock(ctx, issue_flags);
471 
472 	if (ret < 0)
473 		req_set_fail(req);
474 	io_req_set_res(req, ret, 0);
475 	return IOU_OK;
476 }
477 
478 static int io_pin_pbuf_ring(struct io_uring_buf_reg *reg,
479 			    struct io_buffer_list *bl)
480 {
481 	struct io_uring_buf_ring *br;
482 	struct page **pages;
483 	int i, nr_pages;
484 
485 	pages = io_pin_pages(reg->ring_addr,
486 			     flex_array_size(br, bufs, reg->ring_entries),
487 			     &nr_pages);
488 	if (IS_ERR(pages))
489 		return PTR_ERR(pages);
490 
491 	/*
492 	 * Apparently some 32-bit boxes (ARM) will return highmem pages,
493 	 * which then need to be mapped. We could support that, but it'd
494 	 * complicate the code and slowdown the common cases quite a bit.
495 	 * So just error out, returning -EINVAL just like we did on kernels
496 	 * that didn't support mapped buffer rings.
497 	 */
498 	for (i = 0; i < nr_pages; i++)
499 		if (PageHighMem(pages[i]))
500 			goto error_unpin;
501 
502 	br = page_address(pages[0]);
503 #ifdef SHM_COLOUR
504 	/*
505 	 * On platforms that have specific aliasing requirements, SHM_COLOUR
506 	 * is set and we must guarantee that the kernel and user side align
507 	 * nicely. We cannot do that if IOU_PBUF_RING_MMAP isn't set and
508 	 * the application mmap's the provided ring buffer. Fail the request
509 	 * if we, by chance, don't end up with aligned addresses. The app
510 	 * should use IOU_PBUF_RING_MMAP instead, and liburing will handle
511 	 * this transparently.
512 	 */
513 	if ((reg->ring_addr | (unsigned long) br) & (SHM_COLOUR - 1))
514 		goto error_unpin;
515 #endif
516 	bl->buf_pages = pages;
517 	bl->buf_nr_pages = nr_pages;
518 	bl->buf_ring = br;
519 	bl->is_mapped = 1;
520 	bl->is_mmap = 0;
521 	return 0;
522 error_unpin:
523 	for (i = 0; i < nr_pages; i++)
524 		unpin_user_page(pages[i]);
525 	kvfree(pages);
526 	return -EINVAL;
527 }
528 
529 static int io_alloc_pbuf_ring(struct io_uring_buf_reg *reg,
530 			      struct io_buffer_list *bl)
531 {
532 	gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
533 	size_t ring_size;
534 	void *ptr;
535 
536 	ring_size = reg->ring_entries * sizeof(struct io_uring_buf_ring);
537 	ptr = (void *) __get_free_pages(gfp, get_order(ring_size));
538 	if (!ptr)
539 		return -ENOMEM;
540 
541 	bl->buf_ring = ptr;
542 	bl->is_mapped = 1;
543 	bl->is_mmap = 1;
544 	return 0;
545 }
546 
547 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
548 {
549 	struct io_uring_buf_reg reg;
550 	struct io_buffer_list *bl, *free_bl = NULL;
551 	int ret;
552 
553 	if (copy_from_user(&reg, arg, sizeof(reg)))
554 		return -EFAULT;
555 
556 	if (reg.resv[0] || reg.resv[1] || reg.resv[2])
557 		return -EINVAL;
558 	if (reg.flags & ~IOU_PBUF_RING_MMAP)
559 		return -EINVAL;
560 	if (!(reg.flags & IOU_PBUF_RING_MMAP)) {
561 		if (!reg.ring_addr)
562 			return -EFAULT;
563 		if (reg.ring_addr & ~PAGE_MASK)
564 			return -EINVAL;
565 	} else {
566 		if (reg.ring_addr)
567 			return -EINVAL;
568 	}
569 
570 	if (!is_power_of_2(reg.ring_entries))
571 		return -EINVAL;
572 
573 	/* cannot disambiguate full vs empty due to head/tail size */
574 	if (reg.ring_entries >= 65536)
575 		return -EINVAL;
576 
577 	if (unlikely(reg.bgid < BGID_ARRAY && !ctx->io_bl)) {
578 		int ret = io_init_bl_list(ctx);
579 		if (ret)
580 			return ret;
581 	}
582 
583 	bl = io_buffer_get_list(ctx, reg.bgid);
584 	if (bl) {
585 		/* if mapped buffer ring OR classic exists, don't allow */
586 		if (bl->is_mapped || !list_empty(&bl->buf_list))
587 			return -EEXIST;
588 	} else {
589 		free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
590 		if (!bl)
591 			return -ENOMEM;
592 	}
593 
594 	if (!(reg.flags & IOU_PBUF_RING_MMAP))
595 		ret = io_pin_pbuf_ring(&reg, bl);
596 	else
597 		ret = io_alloc_pbuf_ring(&reg, bl);
598 
599 	if (!ret) {
600 		bl->nr_entries = reg.ring_entries;
601 		bl->mask = reg.ring_entries - 1;
602 
603 		io_buffer_add_list(ctx, bl, reg.bgid);
604 		return 0;
605 	}
606 
607 	kfree(free_bl);
608 	return ret;
609 }
610 
611 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
612 {
613 	struct io_uring_buf_reg reg;
614 	struct io_buffer_list *bl;
615 
616 	if (copy_from_user(&reg, arg, sizeof(reg)))
617 		return -EFAULT;
618 	if (reg.resv[0] || reg.resv[1] || reg.resv[2])
619 		return -EINVAL;
620 	if (reg.flags)
621 		return -EINVAL;
622 
623 	bl = io_buffer_get_list(ctx, reg.bgid);
624 	if (!bl)
625 		return -ENOENT;
626 	if (!bl->is_mapped)
627 		return -EINVAL;
628 
629 	__io_remove_buffers(ctx, bl, -1U);
630 	if (bl->bgid >= BGID_ARRAY) {
631 		xa_erase(&ctx->io_bl_xa, bl->bgid);
632 		kfree(bl);
633 	}
634 	return 0;
635 }
636 
637 void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid)
638 {
639 	struct io_buffer_list *bl;
640 
641 	bl = io_buffer_get_list(ctx, bgid);
642 	if (!bl || !bl->is_mmap)
643 		return NULL;
644 
645 	return bl->buf_ring;
646 }
647