xref: /openbmc/linux/io_uring/kbuf.c (revision 49c23519)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/poll.h>
10 #include <linux/io_uring.h>
11 
12 #include <uapi/linux/io_uring.h>
13 
14 #include "io_uring.h"
15 #include "opdef.h"
16 #include "kbuf.h"
17 
18 #define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf))
19 
20 /* BIDs are addressed by a 16-bit field in a CQE */
21 #define MAX_BIDS_PER_BGID (1 << 16)
22 
23 struct io_provide_buf {
24 	struct file			*file;
25 	__u64				addr;
26 	__u32				len;
27 	__u32				bgid;
28 	__u32				nbufs;
29 	__u16				bid;
30 };
31 
32 static inline struct io_buffer_list *__io_buffer_get_list(struct io_ring_ctx *ctx,
33 							  unsigned int bgid)
34 {
35 	return xa_load(&ctx->io_bl_xa, bgid);
36 }
37 
38 struct io_buf_free {
39 	struct hlist_node		list;
40 	void				*mem;
41 	size_t				size;
42 	int				inuse;
43 };
44 
45 static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
46 							unsigned int bgid)
47 {
48 	lockdep_assert_held(&ctx->uring_lock);
49 
50 	return __io_buffer_get_list(ctx, bgid);
51 }
52 
53 static int io_buffer_add_list(struct io_ring_ctx *ctx,
54 			      struct io_buffer_list *bl, unsigned int bgid)
55 {
56 	/*
57 	 * Store buffer group ID and finally mark the list as visible.
58 	 * The normal lookup doesn't care about the visibility as we're
59 	 * always under the ->uring_lock, but the RCU lookup from mmap does.
60 	 */
61 	bl->bgid = bgid;
62 	atomic_set(&bl->refs, 1);
63 	return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
64 }
65 
66 void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
67 {
68 	struct io_ring_ctx *ctx = req->ctx;
69 	struct io_buffer_list *bl;
70 	struct io_buffer *buf;
71 
72 	/*
73 	 * For legacy provided buffer mode, don't recycle if we already did
74 	 * IO to this buffer. For ring-mapped provided buffer mode, we should
75 	 * increment ring->head to explicitly monopolize the buffer to avoid
76 	 * multiple use.
77 	 */
78 	if (req->flags & REQ_F_PARTIAL_IO)
79 		return;
80 
81 	io_ring_submit_lock(ctx, issue_flags);
82 
83 	buf = req->kbuf;
84 	bl = io_buffer_get_list(ctx, buf->bgid);
85 	list_add(&buf->list, &bl->buf_list);
86 	req->flags &= ~REQ_F_BUFFER_SELECTED;
87 	req->buf_index = buf->bgid;
88 
89 	io_ring_submit_unlock(ctx, issue_flags);
90 	return;
91 }
92 
93 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
94 {
95 	unsigned int cflags;
96 
97 	/*
98 	 * We can add this buffer back to two lists:
99 	 *
100 	 * 1) The io_buffers_cache list. This one is protected by the
101 	 *    ctx->uring_lock. If we already hold this lock, add back to this
102 	 *    list as we can grab it from issue as well.
103 	 * 2) The io_buffers_comp list. This one is protected by the
104 	 *    ctx->completion_lock.
105 	 *
106 	 * We migrate buffers from the comp_list to the issue cache list
107 	 * when we need one.
108 	 */
109 	if (req->flags & REQ_F_BUFFER_RING) {
110 		/* no buffers to recycle for this case */
111 		cflags = __io_put_kbuf_list(req, NULL);
112 	} else if (issue_flags & IO_URING_F_UNLOCKED) {
113 		struct io_ring_ctx *ctx = req->ctx;
114 
115 		spin_lock(&ctx->completion_lock);
116 		cflags = __io_put_kbuf_list(req, &ctx->io_buffers_comp);
117 		spin_unlock(&ctx->completion_lock);
118 	} else {
119 		lockdep_assert_held(&req->ctx->uring_lock);
120 
121 		cflags = __io_put_kbuf_list(req, &req->ctx->io_buffers_cache);
122 	}
123 	return cflags;
124 }
125 
126 static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
127 					      struct io_buffer_list *bl)
128 {
129 	if (!list_empty(&bl->buf_list)) {
130 		struct io_buffer *kbuf;
131 
132 		kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
133 		list_del(&kbuf->list);
134 		if (*len == 0 || *len > kbuf->len)
135 			*len = kbuf->len;
136 		req->flags |= REQ_F_BUFFER_SELECTED;
137 		req->kbuf = kbuf;
138 		req->buf_index = kbuf->bid;
139 		return u64_to_user_ptr(kbuf->addr);
140 	}
141 	return NULL;
142 }
143 
144 static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
145 					  struct io_buffer_list *bl,
146 					  unsigned int issue_flags)
147 {
148 	struct io_uring_buf_ring *br = bl->buf_ring;
149 	struct io_uring_buf *buf;
150 	__u16 head = bl->head;
151 
152 	if (unlikely(smp_load_acquire(&br->tail) == head))
153 		return NULL;
154 
155 	head &= bl->mask;
156 	/* mmaped buffers are always contig */
157 	if (bl->is_mmap || head < IO_BUFFER_LIST_BUF_PER_PAGE) {
158 		buf = &br->bufs[head];
159 	} else {
160 		int off = head & (IO_BUFFER_LIST_BUF_PER_PAGE - 1);
161 		int index = head / IO_BUFFER_LIST_BUF_PER_PAGE;
162 		buf = page_address(bl->buf_pages[index]);
163 		buf += off;
164 	}
165 	if (*len == 0 || *len > buf->len)
166 		*len = buf->len;
167 	req->flags |= REQ_F_BUFFER_RING;
168 	req->buf_list = bl;
169 	req->buf_index = buf->bid;
170 
171 	if (issue_flags & IO_URING_F_UNLOCKED || !file_can_poll(req->file)) {
172 		/*
173 		 * If we came in unlocked, we have no choice but to consume the
174 		 * buffer here, otherwise nothing ensures that the buffer won't
175 		 * get used by others. This does mean it'll be pinned until the
176 		 * IO completes, coming in unlocked means we're being called from
177 		 * io-wq context and there may be further retries in async hybrid
178 		 * mode. For the locked case, the caller must call commit when
179 		 * the transfer completes (or if we get -EAGAIN and must poll of
180 		 * retry).
181 		 */
182 		req->buf_list = NULL;
183 		bl->head++;
184 	}
185 	return u64_to_user_ptr(buf->addr);
186 }
187 
188 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
189 			      unsigned int issue_flags)
190 {
191 	struct io_ring_ctx *ctx = req->ctx;
192 	struct io_buffer_list *bl;
193 	void __user *ret = NULL;
194 
195 	io_ring_submit_lock(req->ctx, issue_flags);
196 
197 	bl = io_buffer_get_list(ctx, req->buf_index);
198 	if (likely(bl)) {
199 		if (bl->is_mapped)
200 			ret = io_ring_buffer_select(req, len, bl, issue_flags);
201 		else
202 			ret = io_provided_buffer_select(req, len, bl);
203 	}
204 	io_ring_submit_unlock(req->ctx, issue_flags);
205 	return ret;
206 }
207 
208 /*
209  * Mark the given mapped range as free for reuse
210  */
211 static void io_kbuf_mark_free(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
212 {
213 	struct io_buf_free *ibf;
214 
215 	hlist_for_each_entry(ibf, &ctx->io_buf_list, list) {
216 		if (bl->buf_ring == ibf->mem) {
217 			ibf->inuse = 0;
218 			return;
219 		}
220 	}
221 
222 	/* can't happen... */
223 	WARN_ON_ONCE(1);
224 }
225 
226 static int __io_remove_buffers(struct io_ring_ctx *ctx,
227 			       struct io_buffer_list *bl, unsigned nbufs)
228 {
229 	unsigned i = 0;
230 
231 	/* shouldn't happen */
232 	if (!nbufs)
233 		return 0;
234 
235 	if (bl->is_mapped) {
236 		i = bl->buf_ring->tail - bl->head;
237 		if (bl->is_mmap) {
238 			/*
239 			 * io_kbuf_list_free() will free the page(s) at
240 			 * ->release() time.
241 			 */
242 			io_kbuf_mark_free(ctx, bl);
243 			bl->buf_ring = NULL;
244 			bl->is_mmap = 0;
245 		} else if (bl->buf_nr_pages) {
246 			int j;
247 
248 			for (j = 0; j < bl->buf_nr_pages; j++)
249 				unpin_user_page(bl->buf_pages[j]);
250 			kvfree(bl->buf_pages);
251 			bl->buf_pages = NULL;
252 			bl->buf_nr_pages = 0;
253 		}
254 		/* make sure it's seen as empty */
255 		INIT_LIST_HEAD(&bl->buf_list);
256 		bl->is_mapped = 0;
257 		return i;
258 	}
259 
260 	/* protects io_buffers_cache */
261 	lockdep_assert_held(&ctx->uring_lock);
262 
263 	while (!list_empty(&bl->buf_list)) {
264 		struct io_buffer *nxt;
265 
266 		nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
267 		list_move(&nxt->list, &ctx->io_buffers_cache);
268 		if (++i == nbufs)
269 			return i;
270 		cond_resched();
271 	}
272 
273 	return i;
274 }
275 
276 void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
277 {
278 	if (atomic_dec_and_test(&bl->refs)) {
279 		__io_remove_buffers(ctx, bl, -1U);
280 		kfree_rcu(bl, rcu);
281 	}
282 }
283 
284 void io_destroy_buffers(struct io_ring_ctx *ctx)
285 {
286 	struct io_buffer_list *bl;
287 	unsigned long index;
288 
289 	xa_for_each(&ctx->io_bl_xa, index, bl) {
290 		xa_erase(&ctx->io_bl_xa, bl->bgid);
291 		io_put_bl(ctx, bl);
292 	}
293 
294 	while (!list_empty(&ctx->io_buffers_pages)) {
295 		struct page *page;
296 
297 		page = list_first_entry(&ctx->io_buffers_pages, struct page, lru);
298 		list_del_init(&page->lru);
299 		__free_page(page);
300 	}
301 }
302 
303 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
304 {
305 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
306 	u64 tmp;
307 
308 	if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
309 	    sqe->splice_fd_in)
310 		return -EINVAL;
311 
312 	tmp = READ_ONCE(sqe->fd);
313 	if (!tmp || tmp > MAX_BIDS_PER_BGID)
314 		return -EINVAL;
315 
316 	memset(p, 0, sizeof(*p));
317 	p->nbufs = tmp;
318 	p->bgid = READ_ONCE(sqe->buf_group);
319 	return 0;
320 }
321 
322 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
323 {
324 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
325 	struct io_ring_ctx *ctx = req->ctx;
326 	struct io_buffer_list *bl;
327 	int ret = 0;
328 
329 	io_ring_submit_lock(ctx, issue_flags);
330 
331 	ret = -ENOENT;
332 	bl = io_buffer_get_list(ctx, p->bgid);
333 	if (bl) {
334 		ret = -EINVAL;
335 		/* can't use provide/remove buffers command on mapped buffers */
336 		if (!bl->is_mapped)
337 			ret = __io_remove_buffers(ctx, bl, p->nbufs);
338 	}
339 	io_ring_submit_unlock(ctx, issue_flags);
340 	if (ret < 0)
341 		req_set_fail(req);
342 	io_req_set_res(req, ret, 0);
343 	return IOU_OK;
344 }
345 
346 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
347 {
348 	unsigned long size, tmp_check;
349 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
350 	u64 tmp;
351 
352 	if (sqe->rw_flags || sqe->splice_fd_in)
353 		return -EINVAL;
354 
355 	tmp = READ_ONCE(sqe->fd);
356 	if (!tmp || tmp > MAX_BIDS_PER_BGID)
357 		return -E2BIG;
358 	p->nbufs = tmp;
359 	p->addr = READ_ONCE(sqe->addr);
360 	p->len = READ_ONCE(sqe->len);
361 
362 	if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
363 				&size))
364 		return -EOVERFLOW;
365 	if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
366 		return -EOVERFLOW;
367 
368 	size = (unsigned long)p->len * p->nbufs;
369 	if (!access_ok(u64_to_user_ptr(p->addr), size))
370 		return -EFAULT;
371 
372 	p->bgid = READ_ONCE(sqe->buf_group);
373 	tmp = READ_ONCE(sqe->off);
374 	if (tmp > USHRT_MAX)
375 		return -E2BIG;
376 	if (tmp + p->nbufs > MAX_BIDS_PER_BGID)
377 		return -EINVAL;
378 	p->bid = tmp;
379 	return 0;
380 }
381 
382 static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
383 {
384 	struct io_buffer *buf;
385 	struct page *page;
386 	int bufs_in_page;
387 
388 	/*
389 	 * Completions that don't happen inline (eg not under uring_lock) will
390 	 * add to ->io_buffers_comp. If we don't have any free buffers, check
391 	 * the completion list and splice those entries first.
392 	 */
393 	if (!list_empty_careful(&ctx->io_buffers_comp)) {
394 		spin_lock(&ctx->completion_lock);
395 		if (!list_empty(&ctx->io_buffers_comp)) {
396 			list_splice_init(&ctx->io_buffers_comp,
397 						&ctx->io_buffers_cache);
398 			spin_unlock(&ctx->completion_lock);
399 			return 0;
400 		}
401 		spin_unlock(&ctx->completion_lock);
402 	}
403 
404 	/*
405 	 * No free buffers and no completion entries either. Allocate a new
406 	 * page worth of buffer entries and add those to our freelist.
407 	 */
408 	page = alloc_page(GFP_KERNEL_ACCOUNT);
409 	if (!page)
410 		return -ENOMEM;
411 
412 	list_add(&page->lru, &ctx->io_buffers_pages);
413 
414 	buf = page_address(page);
415 	bufs_in_page = PAGE_SIZE / sizeof(*buf);
416 	while (bufs_in_page) {
417 		list_add_tail(&buf->list, &ctx->io_buffers_cache);
418 		buf++;
419 		bufs_in_page--;
420 	}
421 
422 	return 0;
423 }
424 
425 static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
426 			  struct io_buffer_list *bl)
427 {
428 	struct io_buffer *buf;
429 	u64 addr = pbuf->addr;
430 	int i, bid = pbuf->bid;
431 
432 	for (i = 0; i < pbuf->nbufs; i++) {
433 		if (list_empty(&ctx->io_buffers_cache) &&
434 		    io_refill_buffer_cache(ctx))
435 			break;
436 		buf = list_first_entry(&ctx->io_buffers_cache, struct io_buffer,
437 					list);
438 		list_move_tail(&buf->list, &bl->buf_list);
439 		buf->addr = addr;
440 		buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
441 		buf->bid = bid;
442 		buf->bgid = pbuf->bgid;
443 		addr += pbuf->len;
444 		bid++;
445 		cond_resched();
446 	}
447 
448 	return i ? 0 : -ENOMEM;
449 }
450 
451 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
452 {
453 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
454 	struct io_ring_ctx *ctx = req->ctx;
455 	struct io_buffer_list *bl;
456 	int ret = 0;
457 
458 	io_ring_submit_lock(ctx, issue_flags);
459 
460 	bl = io_buffer_get_list(ctx, p->bgid);
461 	if (unlikely(!bl)) {
462 		bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
463 		if (!bl) {
464 			ret = -ENOMEM;
465 			goto err;
466 		}
467 		INIT_LIST_HEAD(&bl->buf_list);
468 		ret = io_buffer_add_list(ctx, bl, p->bgid);
469 		if (ret) {
470 			/*
471 			 * Doesn't need rcu free as it was never visible, but
472 			 * let's keep it consistent throughout.
473 			 */
474 			kfree_rcu(bl, rcu);
475 			goto err;
476 		}
477 	}
478 	/* can't add buffers via this command for a mapped buffer ring */
479 	if (bl->is_mapped) {
480 		ret = -EINVAL;
481 		goto err;
482 	}
483 
484 	ret = io_add_buffers(ctx, p, bl);
485 err:
486 	io_ring_submit_unlock(ctx, issue_flags);
487 
488 	if (ret < 0)
489 		req_set_fail(req);
490 	io_req_set_res(req, ret, 0);
491 	return IOU_OK;
492 }
493 
494 static int io_pin_pbuf_ring(struct io_uring_buf_reg *reg,
495 			    struct io_buffer_list *bl)
496 {
497 	struct io_uring_buf_ring *br;
498 	struct page **pages;
499 	int i, nr_pages;
500 
501 	pages = io_pin_pages(reg->ring_addr,
502 			     flex_array_size(br, bufs, reg->ring_entries),
503 			     &nr_pages);
504 	if (IS_ERR(pages))
505 		return PTR_ERR(pages);
506 
507 	/*
508 	 * Apparently some 32-bit boxes (ARM) will return highmem pages,
509 	 * which then need to be mapped. We could support that, but it'd
510 	 * complicate the code and slowdown the common cases quite a bit.
511 	 * So just error out, returning -EINVAL just like we did on kernels
512 	 * that didn't support mapped buffer rings.
513 	 */
514 	for (i = 0; i < nr_pages; i++)
515 		if (PageHighMem(pages[i]))
516 			goto error_unpin;
517 
518 	br = page_address(pages[0]);
519 #ifdef SHM_COLOUR
520 	/*
521 	 * On platforms that have specific aliasing requirements, SHM_COLOUR
522 	 * is set and we must guarantee that the kernel and user side align
523 	 * nicely. We cannot do that if IOU_PBUF_RING_MMAP isn't set and
524 	 * the application mmap's the provided ring buffer. Fail the request
525 	 * if we, by chance, don't end up with aligned addresses. The app
526 	 * should use IOU_PBUF_RING_MMAP instead, and liburing will handle
527 	 * this transparently.
528 	 */
529 	if ((reg->ring_addr | (unsigned long) br) & (SHM_COLOUR - 1))
530 		goto error_unpin;
531 #endif
532 	bl->buf_pages = pages;
533 	bl->buf_nr_pages = nr_pages;
534 	bl->buf_ring = br;
535 	bl->is_mapped = 1;
536 	bl->is_mmap = 0;
537 	return 0;
538 error_unpin:
539 	for (i = 0; i < nr_pages; i++)
540 		unpin_user_page(pages[i]);
541 	kvfree(pages);
542 	return -EINVAL;
543 }
544 
545 /*
546  * See if we have a suitable region that we can reuse, rather than allocate
547  * both a new io_buf_free and mem region again. We leave it on the list as
548  * even a reused entry will need freeing at ring release.
549  */
550 static struct io_buf_free *io_lookup_buf_free_entry(struct io_ring_ctx *ctx,
551 						    size_t ring_size)
552 {
553 	struct io_buf_free *ibf, *best = NULL;
554 	size_t best_dist;
555 
556 	hlist_for_each_entry(ibf, &ctx->io_buf_list, list) {
557 		size_t dist;
558 
559 		if (ibf->inuse || ibf->size < ring_size)
560 			continue;
561 		dist = ibf->size - ring_size;
562 		if (!best || dist < best_dist) {
563 			best = ibf;
564 			if (!dist)
565 				break;
566 			best_dist = dist;
567 		}
568 	}
569 
570 	return best;
571 }
572 
573 static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx,
574 			      struct io_uring_buf_reg *reg,
575 			      struct io_buffer_list *bl)
576 {
577 	struct io_buf_free *ibf;
578 	size_t ring_size;
579 	void *ptr;
580 
581 	ring_size = reg->ring_entries * sizeof(struct io_uring_buf_ring);
582 
583 	/* Reuse existing entry, if we can */
584 	ibf = io_lookup_buf_free_entry(ctx, ring_size);
585 	if (!ibf) {
586 		ptr = io_mem_alloc(ring_size);
587 		if (IS_ERR(ptr))
588 			return PTR_ERR(ptr);
589 
590 		/* Allocate and store deferred free entry */
591 		ibf = kmalloc(sizeof(*ibf), GFP_KERNEL_ACCOUNT);
592 		if (!ibf) {
593 			io_mem_free(ptr);
594 			return -ENOMEM;
595 		}
596 		ibf->mem = ptr;
597 		ibf->size = ring_size;
598 		hlist_add_head(&ibf->list, &ctx->io_buf_list);
599 	}
600 	ibf->inuse = 1;
601 	bl->buf_ring = ibf->mem;
602 	bl->is_mapped = 1;
603 	bl->is_mmap = 1;
604 	return 0;
605 }
606 
607 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
608 {
609 	struct io_uring_buf_reg reg;
610 	struct io_buffer_list *bl, *free_bl = NULL;
611 	int ret;
612 
613 	lockdep_assert_held(&ctx->uring_lock);
614 
615 	if (copy_from_user(&reg, arg, sizeof(reg)))
616 		return -EFAULT;
617 
618 	if (reg.resv[0] || reg.resv[1] || reg.resv[2])
619 		return -EINVAL;
620 	if (reg.flags & ~IOU_PBUF_RING_MMAP)
621 		return -EINVAL;
622 	if (!(reg.flags & IOU_PBUF_RING_MMAP)) {
623 		if (!reg.ring_addr)
624 			return -EFAULT;
625 		if (reg.ring_addr & ~PAGE_MASK)
626 			return -EINVAL;
627 	} else {
628 		if (reg.ring_addr)
629 			return -EINVAL;
630 	}
631 
632 	if (!is_power_of_2(reg.ring_entries))
633 		return -EINVAL;
634 
635 	/* cannot disambiguate full vs empty due to head/tail size */
636 	if (reg.ring_entries >= 65536)
637 		return -EINVAL;
638 
639 	bl = io_buffer_get_list(ctx, reg.bgid);
640 	if (bl) {
641 		/* if mapped buffer ring OR classic exists, don't allow */
642 		if (bl->is_mapped || !list_empty(&bl->buf_list))
643 			return -EEXIST;
644 	} else {
645 		free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
646 		if (!bl)
647 			return -ENOMEM;
648 	}
649 
650 	if (!(reg.flags & IOU_PBUF_RING_MMAP))
651 		ret = io_pin_pbuf_ring(&reg, bl);
652 	else
653 		ret = io_alloc_pbuf_ring(ctx, &reg, bl);
654 
655 	if (!ret) {
656 		bl->nr_entries = reg.ring_entries;
657 		bl->mask = reg.ring_entries - 1;
658 
659 		io_buffer_add_list(ctx, bl, reg.bgid);
660 		return 0;
661 	}
662 
663 	kfree_rcu(free_bl, rcu);
664 	return ret;
665 }
666 
667 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
668 {
669 	struct io_uring_buf_reg reg;
670 	struct io_buffer_list *bl;
671 
672 	lockdep_assert_held(&ctx->uring_lock);
673 
674 	if (copy_from_user(&reg, arg, sizeof(reg)))
675 		return -EFAULT;
676 	if (reg.resv[0] || reg.resv[1] || reg.resv[2])
677 		return -EINVAL;
678 	if (reg.flags)
679 		return -EINVAL;
680 
681 	bl = io_buffer_get_list(ctx, reg.bgid);
682 	if (!bl)
683 		return -ENOENT;
684 	if (!bl->is_mapped)
685 		return -EINVAL;
686 
687 	xa_erase(&ctx->io_bl_xa, bl->bgid);
688 	io_put_bl(ctx, bl);
689 	return 0;
690 }
691 
692 struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
693 				      unsigned long bgid)
694 {
695 	struct io_buffer_list *bl;
696 	bool ret;
697 
698 	/*
699 	 * We have to be a bit careful here - we're inside mmap and cannot grab
700 	 * the uring_lock. This means the buffer_list could be simultaneously
701 	 * going away, if someone is trying to be sneaky. Look it up under rcu
702 	 * so we know it's not going away, and attempt to grab a reference to
703 	 * it. If the ref is already zero, then fail the mapping. If successful,
704 	 * the caller will call io_put_bl() to drop the the reference at at the
705 	 * end. This may then safely free the buffer_list (and drop the pages)
706 	 * at that point, vm_insert_pages() would've already grabbed the
707 	 * necessary vma references.
708 	 */
709 	rcu_read_lock();
710 	bl = xa_load(&ctx->io_bl_xa, bgid);
711 	/* must be a mmap'able buffer ring and have pages */
712 	ret = false;
713 	if (bl && bl->is_mmap)
714 		ret = atomic_inc_not_zero(&bl->refs);
715 	rcu_read_unlock();
716 
717 	if (ret)
718 		return bl;
719 
720 	return ERR_PTR(-EINVAL);
721 }
722 
723 /*
724  * Called at or after ->release(), free the mmap'ed buffers that we used
725  * for memory mapped provided buffer rings.
726  */
727 void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx)
728 {
729 	struct io_buf_free *ibf;
730 	struct hlist_node *tmp;
731 
732 	hlist_for_each_entry_safe(ibf, tmp, &ctx->io_buf_list, list) {
733 		hlist_del(&ibf->list);
734 		io_mem_free(ibf->mem);
735 		kfree(ibf);
736 	}
737 }
738