xref: /openbmc/linux/io_uring/kbuf.c (revision 36db6e8484ed455bbb320d89a119378897ae991c)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/poll.h>
10 #include <linux/vmalloc.h>
11 #include <linux/io_uring.h>
12 
13 #include <uapi/linux/io_uring.h>
14 
15 #include "io_uring.h"
16 #include "opdef.h"
17 #include "kbuf.h"
18 
19 #define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf))
20 
21 /* BIDs are addressed by a 16-bit field in a CQE */
22 #define MAX_BIDS_PER_BGID (1 << 16)
23 
24 struct io_provide_buf {
25 	struct file			*file;
26 	__u64				addr;
27 	__u32				len;
28 	__u32				bgid;
29 	__u32				nbufs;
30 	__u16				bid;
31 };
32 
io_buffer_get_list(struct io_ring_ctx * ctx,unsigned int bgid)33 static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
34 							unsigned int bgid)
35 {
36 	lockdep_assert_held(&ctx->uring_lock);
37 
38 	return xa_load(&ctx->io_bl_xa, bgid);
39 }
40 
io_buffer_add_list(struct io_ring_ctx * ctx,struct io_buffer_list * bl,unsigned int bgid)41 static int io_buffer_add_list(struct io_ring_ctx *ctx,
42 			      struct io_buffer_list *bl, unsigned int bgid)
43 {
44 	/*
45 	 * Store buffer group ID and finally mark the list as visible.
46 	 * The normal lookup doesn't care about the visibility as we're
47 	 * always under the ->uring_lock, but the RCU lookup from mmap does.
48 	 */
49 	bl->bgid = bgid;
50 	atomic_set(&bl->refs, 1);
51 	return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
52 }
53 
io_kbuf_recycle_legacy(struct io_kiocb * req,unsigned issue_flags)54 void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
55 {
56 	struct io_ring_ctx *ctx = req->ctx;
57 	struct io_buffer_list *bl;
58 	struct io_buffer *buf;
59 
60 	/*
61 	 * For legacy provided buffer mode, don't recycle if we already did
62 	 * IO to this buffer. For ring-mapped provided buffer mode, we should
63 	 * increment ring->head to explicitly monopolize the buffer to avoid
64 	 * multiple use.
65 	 */
66 	if (req->flags & REQ_F_PARTIAL_IO)
67 		return;
68 
69 	io_ring_submit_lock(ctx, issue_flags);
70 
71 	buf = req->kbuf;
72 	bl = io_buffer_get_list(ctx, buf->bgid);
73 	list_add(&buf->list, &bl->buf_list);
74 	req->flags &= ~REQ_F_BUFFER_SELECTED;
75 	req->buf_index = buf->bgid;
76 
77 	io_ring_submit_unlock(ctx, issue_flags);
78 	return;
79 }
80 
__io_put_kbuf(struct io_kiocb * req,unsigned issue_flags)81 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
82 {
83 	unsigned int cflags;
84 
85 	/*
86 	 * We can add this buffer back to two lists:
87 	 *
88 	 * 1) The io_buffers_cache list. This one is protected by the
89 	 *    ctx->uring_lock. If we already hold this lock, add back to this
90 	 *    list as we can grab it from issue as well.
91 	 * 2) The io_buffers_comp list. This one is protected by the
92 	 *    ctx->completion_lock.
93 	 *
94 	 * We migrate buffers from the comp_list to the issue cache list
95 	 * when we need one.
96 	 */
97 	if (req->flags & REQ_F_BUFFER_RING) {
98 		/* no buffers to recycle for this case */
99 		cflags = __io_put_kbuf_list(req, NULL);
100 	} else if (issue_flags & IO_URING_F_UNLOCKED) {
101 		struct io_ring_ctx *ctx = req->ctx;
102 
103 		spin_lock(&ctx->completion_lock);
104 		cflags = __io_put_kbuf_list(req, &ctx->io_buffers_comp);
105 		spin_unlock(&ctx->completion_lock);
106 	} else {
107 		lockdep_assert_held(&req->ctx->uring_lock);
108 
109 		cflags = __io_put_kbuf_list(req, &req->ctx->io_buffers_cache);
110 	}
111 	return cflags;
112 }
113 
io_provided_buffer_select(struct io_kiocb * req,size_t * len,struct io_buffer_list * bl)114 static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
115 					      struct io_buffer_list *bl)
116 {
117 	if (!list_empty(&bl->buf_list)) {
118 		struct io_buffer *kbuf;
119 
120 		kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
121 		list_del(&kbuf->list);
122 		if (*len == 0 || *len > kbuf->len)
123 			*len = kbuf->len;
124 		req->flags |= REQ_F_BUFFER_SELECTED;
125 		req->kbuf = kbuf;
126 		req->buf_index = kbuf->bid;
127 		return u64_to_user_ptr(kbuf->addr);
128 	}
129 	return NULL;
130 }
131 
io_ring_buffer_select(struct io_kiocb * req,size_t * len,struct io_buffer_list * bl,unsigned int issue_flags)132 static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
133 					  struct io_buffer_list *bl,
134 					  unsigned int issue_flags)
135 {
136 	struct io_uring_buf_ring *br = bl->buf_ring;
137 	struct io_uring_buf *buf;
138 	__u16 head = bl->head;
139 
140 	if (unlikely(smp_load_acquire(&br->tail) == head))
141 		return NULL;
142 
143 	head &= bl->mask;
144 	buf = &br->bufs[head];
145 	if (*len == 0 || *len > buf->len)
146 		*len = buf->len;
147 	req->flags |= REQ_F_BUFFER_RING;
148 	req->buf_list = bl;
149 	req->buf_index = buf->bid;
150 
151 	if (issue_flags & IO_URING_F_UNLOCKED ||
152 	    (req->file && !file_can_poll(req->file))) {
153 		/*
154 		 * If we came in unlocked, we have no choice but to consume the
155 		 * buffer here, otherwise nothing ensures that the buffer won't
156 		 * get used by others. This does mean it'll be pinned until the
157 		 * IO completes, coming in unlocked means we're being called from
158 		 * io-wq context and there may be further retries in async hybrid
159 		 * mode. For the locked case, the caller must call commit when
160 		 * the transfer completes (or if we get -EAGAIN and must poll of
161 		 * retry).
162 		 */
163 		req->buf_list = NULL;
164 		bl->head++;
165 	}
166 	return u64_to_user_ptr(buf->addr);
167 }
168 
io_buffer_select(struct io_kiocb * req,size_t * len,unsigned int issue_flags)169 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
170 			      unsigned int issue_flags)
171 {
172 	struct io_ring_ctx *ctx = req->ctx;
173 	struct io_buffer_list *bl;
174 	void __user *ret = NULL;
175 
176 	io_ring_submit_lock(req->ctx, issue_flags);
177 
178 	bl = io_buffer_get_list(ctx, req->buf_index);
179 	if (likely(bl)) {
180 		if (bl->is_mapped)
181 			ret = io_ring_buffer_select(req, len, bl, issue_flags);
182 		else
183 			ret = io_provided_buffer_select(req, len, bl);
184 	}
185 	io_ring_submit_unlock(req->ctx, issue_flags);
186 	return ret;
187 }
188 
__io_remove_buffers(struct io_ring_ctx * ctx,struct io_buffer_list * bl,unsigned nbufs)189 static int __io_remove_buffers(struct io_ring_ctx *ctx,
190 			       struct io_buffer_list *bl, unsigned nbufs)
191 {
192 	unsigned i = 0;
193 
194 	/* shouldn't happen */
195 	if (!nbufs)
196 		return 0;
197 
198 	if (bl->is_mapped) {
199 		i = bl->buf_ring->tail - bl->head;
200 		if (bl->buf_nr_pages) {
201 			int j;
202 
203 			if (!bl->is_mmap) {
204 				for (j = 0; j < bl->buf_nr_pages; j++)
205 					unpin_user_page(bl->buf_pages[j]);
206 			}
207 			io_pages_unmap(bl->buf_ring, &bl->buf_pages,
208 					&bl->buf_nr_pages, bl->is_mmap);
209 			bl->is_mmap = 0;
210 		}
211 		/* make sure it's seen as empty */
212 		INIT_LIST_HEAD(&bl->buf_list);
213 		bl->is_mapped = 0;
214 		return i;
215 	}
216 
217 	/* protects io_buffers_cache */
218 	lockdep_assert_held(&ctx->uring_lock);
219 
220 	while (!list_empty(&bl->buf_list)) {
221 		struct io_buffer *nxt;
222 
223 		nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
224 		list_move(&nxt->list, &ctx->io_buffers_cache);
225 		if (++i == nbufs)
226 			return i;
227 		cond_resched();
228 	}
229 
230 	return i;
231 }
232 
io_put_bl(struct io_ring_ctx * ctx,struct io_buffer_list * bl)233 void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
234 {
235 	if (atomic_dec_and_test(&bl->refs)) {
236 		__io_remove_buffers(ctx, bl, -1U);
237 		kfree_rcu(bl, rcu);
238 	}
239 }
240 
io_destroy_buffers(struct io_ring_ctx * ctx)241 void io_destroy_buffers(struct io_ring_ctx *ctx)
242 {
243 	struct io_buffer_list *bl;
244 	unsigned long index;
245 
246 	xa_for_each(&ctx->io_bl_xa, index, bl) {
247 		xa_erase(&ctx->io_bl_xa, bl->bgid);
248 		io_put_bl(ctx, bl);
249 	}
250 
251 	while (!list_empty(&ctx->io_buffers_pages)) {
252 		struct page *page;
253 
254 		page = list_first_entry(&ctx->io_buffers_pages, struct page, lru);
255 		list_del_init(&page->lru);
256 		__free_page(page);
257 	}
258 }
259 
io_destroy_bl(struct io_ring_ctx * ctx,struct io_buffer_list * bl)260 static void io_destroy_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
261 {
262 	xa_erase(&ctx->io_bl_xa, bl->bgid);
263 	io_put_bl(ctx, bl);
264 }
265 
io_remove_buffers_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)266 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
267 {
268 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
269 	u64 tmp;
270 
271 	if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
272 	    sqe->splice_fd_in)
273 		return -EINVAL;
274 
275 	tmp = READ_ONCE(sqe->fd);
276 	if (!tmp || tmp > MAX_BIDS_PER_BGID)
277 		return -EINVAL;
278 
279 	memset(p, 0, sizeof(*p));
280 	p->nbufs = tmp;
281 	p->bgid = READ_ONCE(sqe->buf_group);
282 	return 0;
283 }
284 
io_remove_buffers(struct io_kiocb * req,unsigned int issue_flags)285 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
286 {
287 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
288 	struct io_ring_ctx *ctx = req->ctx;
289 	struct io_buffer_list *bl;
290 	int ret = 0;
291 
292 	io_ring_submit_lock(ctx, issue_flags);
293 
294 	ret = -ENOENT;
295 	bl = io_buffer_get_list(ctx, p->bgid);
296 	if (bl) {
297 		ret = -EINVAL;
298 		/* can't use provide/remove buffers command on mapped buffers */
299 		if (!bl->is_mapped)
300 			ret = __io_remove_buffers(ctx, bl, p->nbufs);
301 	}
302 	io_ring_submit_unlock(ctx, issue_flags);
303 	if (ret < 0)
304 		req_set_fail(req);
305 	io_req_set_res(req, ret, 0);
306 	return IOU_OK;
307 }
308 
io_provide_buffers_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)309 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
310 {
311 	unsigned long size, tmp_check;
312 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
313 	u64 tmp;
314 
315 	if (sqe->rw_flags || sqe->splice_fd_in)
316 		return -EINVAL;
317 
318 	tmp = READ_ONCE(sqe->fd);
319 	if (!tmp || tmp > MAX_BIDS_PER_BGID)
320 		return -E2BIG;
321 	p->nbufs = tmp;
322 	p->addr = READ_ONCE(sqe->addr);
323 	p->len = READ_ONCE(sqe->len);
324 
325 	if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
326 				&size))
327 		return -EOVERFLOW;
328 	if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
329 		return -EOVERFLOW;
330 
331 	size = (unsigned long)p->len * p->nbufs;
332 	if (!access_ok(u64_to_user_ptr(p->addr), size))
333 		return -EFAULT;
334 
335 	p->bgid = READ_ONCE(sqe->buf_group);
336 	tmp = READ_ONCE(sqe->off);
337 	if (tmp > USHRT_MAX)
338 		return -E2BIG;
339 	if (tmp + p->nbufs > MAX_BIDS_PER_BGID)
340 		return -EINVAL;
341 	p->bid = tmp;
342 	return 0;
343 }
344 
io_refill_buffer_cache(struct io_ring_ctx * ctx)345 static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
346 {
347 	struct io_buffer *buf;
348 	struct page *page;
349 	int bufs_in_page;
350 
351 	/*
352 	 * Completions that don't happen inline (eg not under uring_lock) will
353 	 * add to ->io_buffers_comp. If we don't have any free buffers, check
354 	 * the completion list and splice those entries first.
355 	 */
356 	if (!list_empty_careful(&ctx->io_buffers_comp)) {
357 		spin_lock(&ctx->completion_lock);
358 		if (!list_empty(&ctx->io_buffers_comp)) {
359 			list_splice_init(&ctx->io_buffers_comp,
360 						&ctx->io_buffers_cache);
361 			spin_unlock(&ctx->completion_lock);
362 			return 0;
363 		}
364 		spin_unlock(&ctx->completion_lock);
365 	}
366 
367 	/*
368 	 * No free buffers and no completion entries either. Allocate a new
369 	 * page worth of buffer entries and add those to our freelist.
370 	 */
371 	page = alloc_page(GFP_KERNEL_ACCOUNT);
372 	if (!page)
373 		return -ENOMEM;
374 
375 	list_add(&page->lru, &ctx->io_buffers_pages);
376 
377 	buf = page_address(page);
378 	bufs_in_page = PAGE_SIZE / sizeof(*buf);
379 	while (bufs_in_page) {
380 		list_add_tail(&buf->list, &ctx->io_buffers_cache);
381 		buf++;
382 		bufs_in_page--;
383 	}
384 
385 	return 0;
386 }
387 
io_add_buffers(struct io_ring_ctx * ctx,struct io_provide_buf * pbuf,struct io_buffer_list * bl)388 static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
389 			  struct io_buffer_list *bl)
390 {
391 	struct io_buffer *buf;
392 	u64 addr = pbuf->addr;
393 	int i, bid = pbuf->bid;
394 
395 	for (i = 0; i < pbuf->nbufs; i++) {
396 		if (list_empty(&ctx->io_buffers_cache) &&
397 		    io_refill_buffer_cache(ctx))
398 			break;
399 		buf = list_first_entry(&ctx->io_buffers_cache, struct io_buffer,
400 					list);
401 		list_move_tail(&buf->list, &bl->buf_list);
402 		buf->addr = addr;
403 		buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
404 		buf->bid = bid;
405 		buf->bgid = pbuf->bgid;
406 		addr += pbuf->len;
407 		bid++;
408 		cond_resched();
409 	}
410 
411 	return i ? 0 : -ENOMEM;
412 }
413 
io_provide_buffers(struct io_kiocb * req,unsigned int issue_flags)414 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
415 {
416 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
417 	struct io_ring_ctx *ctx = req->ctx;
418 	struct io_buffer_list *bl;
419 	int ret = 0;
420 
421 	io_ring_submit_lock(ctx, issue_flags);
422 
423 	bl = io_buffer_get_list(ctx, p->bgid);
424 	if (unlikely(!bl)) {
425 		bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
426 		if (!bl) {
427 			ret = -ENOMEM;
428 			goto err;
429 		}
430 		INIT_LIST_HEAD(&bl->buf_list);
431 		ret = io_buffer_add_list(ctx, bl, p->bgid);
432 		if (ret) {
433 			/*
434 			 * Doesn't need rcu free as it was never visible, but
435 			 * let's keep it consistent throughout.
436 			 */
437 			kfree_rcu(bl, rcu);
438 			goto err;
439 		}
440 	}
441 	/* can't add buffers via this command for a mapped buffer ring */
442 	if (bl->is_mapped) {
443 		ret = -EINVAL;
444 		goto err;
445 	}
446 
447 	ret = io_add_buffers(ctx, p, bl);
448 err:
449 	io_ring_submit_unlock(ctx, issue_flags);
450 
451 	if (ret < 0)
452 		req_set_fail(req);
453 	io_req_set_res(req, ret, 0);
454 	return IOU_OK;
455 }
456 
io_pin_pbuf_ring(struct io_uring_buf_reg * reg,struct io_buffer_list * bl)457 static int io_pin_pbuf_ring(struct io_uring_buf_reg *reg,
458 			    struct io_buffer_list *bl)
459 {
460 	struct io_uring_buf_ring *br = NULL;
461 	struct page **pages;
462 	int nr_pages, ret;
463 
464 	pages = io_pin_pages(reg->ring_addr,
465 			     flex_array_size(br, bufs, reg->ring_entries),
466 			     &nr_pages);
467 	if (IS_ERR(pages))
468 		return PTR_ERR(pages);
469 
470 	br = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
471 	if (!br) {
472 		ret = -ENOMEM;
473 		goto error_unpin;
474 	}
475 
476 #ifdef SHM_COLOUR
477 	/*
478 	 * On platforms that have specific aliasing requirements, SHM_COLOUR
479 	 * is set and we must guarantee that the kernel and user side align
480 	 * nicely. We cannot do that if IOU_PBUF_RING_MMAP isn't set and
481 	 * the application mmap's the provided ring buffer. Fail the request
482 	 * if we, by chance, don't end up with aligned addresses. The app
483 	 * should use IOU_PBUF_RING_MMAP instead, and liburing will handle
484 	 * this transparently.
485 	 */
486 	if ((reg->ring_addr | (unsigned long) br) & (SHM_COLOUR - 1)) {
487 		ret = -EINVAL;
488 		goto error_unpin;
489 	}
490 #endif
491 	bl->buf_pages = pages;
492 	bl->buf_nr_pages = nr_pages;
493 	bl->buf_ring = br;
494 	bl->is_mapped = 1;
495 	bl->is_mmap = 0;
496 	return 0;
497 error_unpin:
498 	unpin_user_pages(pages, nr_pages);
499 	kvfree(pages);
500 	vunmap(br);
501 	return ret;
502 }
503 
io_alloc_pbuf_ring(struct io_ring_ctx * ctx,struct io_uring_buf_reg * reg,struct io_buffer_list * bl)504 static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx,
505 			      struct io_uring_buf_reg *reg,
506 			      struct io_buffer_list *bl)
507 {
508 	size_t ring_size;
509 
510 	ring_size = reg->ring_entries * sizeof(struct io_uring_buf_ring);
511 
512 	bl->buf_ring = io_pages_map(&bl->buf_pages, &bl->buf_nr_pages, ring_size);
513 	if (IS_ERR(bl->buf_ring)) {
514 		bl->buf_ring = NULL;
515 		return -ENOMEM;
516 	}
517 	bl->is_mapped = 1;
518 	bl->is_mmap = 1;
519 	return 0;
520 }
521 
io_register_pbuf_ring(struct io_ring_ctx * ctx,void __user * arg)522 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
523 {
524 	struct io_uring_buf_reg reg;
525 	struct io_buffer_list *bl, *free_bl = NULL;
526 	int ret;
527 
528 	lockdep_assert_held(&ctx->uring_lock);
529 
530 	if (copy_from_user(&reg, arg, sizeof(reg)))
531 		return -EFAULT;
532 
533 	if (reg.resv[0] || reg.resv[1] || reg.resv[2])
534 		return -EINVAL;
535 	if (reg.flags & ~IOU_PBUF_RING_MMAP)
536 		return -EINVAL;
537 	if (!(reg.flags & IOU_PBUF_RING_MMAP)) {
538 		if (!reg.ring_addr)
539 			return -EFAULT;
540 		if (reg.ring_addr & ~PAGE_MASK)
541 			return -EINVAL;
542 	} else {
543 		if (reg.ring_addr)
544 			return -EINVAL;
545 	}
546 
547 	if (!is_power_of_2(reg.ring_entries))
548 		return -EINVAL;
549 
550 	/* cannot disambiguate full vs empty due to head/tail size */
551 	if (reg.ring_entries >= 65536)
552 		return -EINVAL;
553 
554 	bl = io_buffer_get_list(ctx, reg.bgid);
555 	if (bl) {
556 		/* if mapped buffer ring OR classic exists, don't allow */
557 		if (bl->is_mapped || !list_empty(&bl->buf_list))
558 			return -EEXIST;
559 		io_destroy_bl(ctx, bl);
560 	}
561 
562 	free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
563 	if (!bl)
564 		return -ENOMEM;
565 
566 	if (!(reg.flags & IOU_PBUF_RING_MMAP))
567 		ret = io_pin_pbuf_ring(&reg, bl);
568 	else
569 		ret = io_alloc_pbuf_ring(ctx, &reg, bl);
570 
571 	if (!ret) {
572 		bl->nr_entries = reg.ring_entries;
573 		bl->mask = reg.ring_entries - 1;
574 
575 		io_buffer_add_list(ctx, bl, reg.bgid);
576 		return 0;
577 	}
578 
579 	kfree_rcu(free_bl, rcu);
580 	return ret;
581 }
582 
io_unregister_pbuf_ring(struct io_ring_ctx * ctx,void __user * arg)583 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
584 {
585 	struct io_uring_buf_reg reg;
586 	struct io_buffer_list *bl;
587 
588 	lockdep_assert_held(&ctx->uring_lock);
589 
590 	if (copy_from_user(&reg, arg, sizeof(reg)))
591 		return -EFAULT;
592 	if (reg.resv[0] || reg.resv[1] || reg.resv[2])
593 		return -EINVAL;
594 	if (reg.flags)
595 		return -EINVAL;
596 
597 	bl = io_buffer_get_list(ctx, reg.bgid);
598 	if (!bl)
599 		return -ENOENT;
600 	if (!bl->is_mapped)
601 		return -EINVAL;
602 
603 	xa_erase(&ctx->io_bl_xa, bl->bgid);
604 	io_put_bl(ctx, bl);
605 	return 0;
606 }
607 
io_pbuf_get_bl(struct io_ring_ctx * ctx,unsigned long bgid)608 struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
609 				      unsigned long bgid)
610 {
611 	struct io_buffer_list *bl;
612 	bool ret;
613 
614 	/*
615 	 * We have to be a bit careful here - we're inside mmap and cannot grab
616 	 * the uring_lock. This means the buffer_list could be simultaneously
617 	 * going away, if someone is trying to be sneaky. Look it up under rcu
618 	 * so we know it's not going away, and attempt to grab a reference to
619 	 * it. If the ref is already zero, then fail the mapping. If successful,
620 	 * the caller will call io_put_bl() to drop the the reference at at the
621 	 * end. This may then safely free the buffer_list (and drop the pages)
622 	 * at that point, vm_insert_pages() would've already grabbed the
623 	 * necessary vma references.
624 	 */
625 	rcu_read_lock();
626 	bl = xa_load(&ctx->io_bl_xa, bgid);
627 	/* must be a mmap'able buffer ring and have pages */
628 	ret = false;
629 	if (bl && bl->is_mmap)
630 		ret = atomic_inc_not_zero(&bl->refs);
631 	rcu_read_unlock();
632 
633 	if (ret)
634 		return bl;
635 
636 	return ERR_PTR(-EINVAL);
637 }
638 
io_pbuf_mmap(struct file * file,struct vm_area_struct * vma)639 int io_pbuf_mmap(struct file *file, struct vm_area_struct *vma)
640 {
641 	struct io_ring_ctx *ctx = file->private_data;
642 	loff_t pgoff = vma->vm_pgoff << PAGE_SHIFT;
643 	struct io_buffer_list *bl;
644 	int bgid, ret;
645 
646 	bgid = (pgoff & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT;
647 	bl = io_pbuf_get_bl(ctx, bgid);
648 	if (IS_ERR(bl))
649 		return PTR_ERR(bl);
650 
651 	ret = io_uring_mmap_pages(ctx, vma, bl->buf_pages, bl->buf_nr_pages);
652 	io_put_bl(ctx, bl);
653 	return ret;
654 }
655