1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/poll.h>
10 #include <linux/vmalloc.h>
11 #include <linux/io_uring.h>
12
13 #include <uapi/linux/io_uring.h>
14
15 #include "io_uring.h"
16 #include "opdef.h"
17 #include "kbuf.h"
18
19 #define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf))
20
21 /* BIDs are addressed by a 16-bit field in a CQE */
22 #define MAX_BIDS_PER_BGID (1 << 16)
23
24 struct io_provide_buf {
25 struct file *file;
26 __u64 addr;
27 __u32 len;
28 __u32 bgid;
29 __u32 nbufs;
30 __u16 bid;
31 };
32
io_buffer_get_list(struct io_ring_ctx * ctx,unsigned int bgid)33 static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
34 unsigned int bgid)
35 {
36 lockdep_assert_held(&ctx->uring_lock);
37
38 return xa_load(&ctx->io_bl_xa, bgid);
39 }
40
io_buffer_add_list(struct io_ring_ctx * ctx,struct io_buffer_list * bl,unsigned int bgid)41 static int io_buffer_add_list(struct io_ring_ctx *ctx,
42 struct io_buffer_list *bl, unsigned int bgid)
43 {
44 /*
45 * Store buffer group ID and finally mark the list as visible.
46 * The normal lookup doesn't care about the visibility as we're
47 * always under the ->uring_lock, but the RCU lookup from mmap does.
48 */
49 bl->bgid = bgid;
50 atomic_set(&bl->refs, 1);
51 return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
52 }
53
io_kbuf_recycle_legacy(struct io_kiocb * req,unsigned issue_flags)54 void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
55 {
56 struct io_ring_ctx *ctx = req->ctx;
57 struct io_buffer_list *bl;
58 struct io_buffer *buf;
59
60 /*
61 * For legacy provided buffer mode, don't recycle if we already did
62 * IO to this buffer. For ring-mapped provided buffer mode, we should
63 * increment ring->head to explicitly monopolize the buffer to avoid
64 * multiple use.
65 */
66 if (req->flags & REQ_F_PARTIAL_IO)
67 return;
68
69 io_ring_submit_lock(ctx, issue_flags);
70
71 buf = req->kbuf;
72 bl = io_buffer_get_list(ctx, buf->bgid);
73 list_add(&buf->list, &bl->buf_list);
74 req->flags &= ~REQ_F_BUFFER_SELECTED;
75 req->buf_index = buf->bgid;
76
77 io_ring_submit_unlock(ctx, issue_flags);
78 return;
79 }
80
__io_put_kbuf(struct io_kiocb * req,unsigned issue_flags)81 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
82 {
83 unsigned int cflags;
84
85 /*
86 * We can add this buffer back to two lists:
87 *
88 * 1) The io_buffers_cache list. This one is protected by the
89 * ctx->uring_lock. If we already hold this lock, add back to this
90 * list as we can grab it from issue as well.
91 * 2) The io_buffers_comp list. This one is protected by the
92 * ctx->completion_lock.
93 *
94 * We migrate buffers from the comp_list to the issue cache list
95 * when we need one.
96 */
97 if (req->flags & REQ_F_BUFFER_RING) {
98 /* no buffers to recycle for this case */
99 cflags = __io_put_kbuf_list(req, NULL);
100 } else if (issue_flags & IO_URING_F_UNLOCKED) {
101 struct io_ring_ctx *ctx = req->ctx;
102
103 spin_lock(&ctx->completion_lock);
104 cflags = __io_put_kbuf_list(req, &ctx->io_buffers_comp);
105 spin_unlock(&ctx->completion_lock);
106 } else {
107 lockdep_assert_held(&req->ctx->uring_lock);
108
109 cflags = __io_put_kbuf_list(req, &req->ctx->io_buffers_cache);
110 }
111 return cflags;
112 }
113
io_provided_buffer_select(struct io_kiocb * req,size_t * len,struct io_buffer_list * bl)114 static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
115 struct io_buffer_list *bl)
116 {
117 if (!list_empty(&bl->buf_list)) {
118 struct io_buffer *kbuf;
119
120 kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
121 list_del(&kbuf->list);
122 if (*len == 0 || *len > kbuf->len)
123 *len = kbuf->len;
124 req->flags |= REQ_F_BUFFER_SELECTED;
125 req->kbuf = kbuf;
126 req->buf_index = kbuf->bid;
127 return u64_to_user_ptr(kbuf->addr);
128 }
129 return NULL;
130 }
131
io_ring_buffer_select(struct io_kiocb * req,size_t * len,struct io_buffer_list * bl,unsigned int issue_flags)132 static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
133 struct io_buffer_list *bl,
134 unsigned int issue_flags)
135 {
136 struct io_uring_buf_ring *br = bl->buf_ring;
137 struct io_uring_buf *buf;
138 __u16 head = bl->head;
139
140 if (unlikely(smp_load_acquire(&br->tail) == head))
141 return NULL;
142
143 head &= bl->mask;
144 buf = &br->bufs[head];
145 if (*len == 0 || *len > buf->len)
146 *len = buf->len;
147 req->flags |= REQ_F_BUFFER_RING;
148 req->buf_list = bl;
149 req->buf_index = buf->bid;
150
151 if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) {
152 /*
153 * If we came in unlocked, we have no choice but to consume the
154 * buffer here, otherwise nothing ensures that the buffer won't
155 * get used by others. This does mean it'll be pinned until the
156 * IO completes, coming in unlocked means we're being called from
157 * io-wq context and there may be further retries in async hybrid
158 * mode. For the locked case, the caller must call commit when
159 * the transfer completes (or if we get -EAGAIN and must poll of
160 * retry).
161 */
162 req->buf_list = NULL;
163 bl->head++;
164 }
165 return u64_to_user_ptr(buf->addr);
166 }
167
io_buffer_select(struct io_kiocb * req,size_t * len,unsigned int issue_flags)168 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
169 unsigned int issue_flags)
170 {
171 struct io_ring_ctx *ctx = req->ctx;
172 struct io_buffer_list *bl;
173 void __user *ret = NULL;
174
175 io_ring_submit_lock(req->ctx, issue_flags);
176
177 bl = io_buffer_get_list(ctx, req->buf_index);
178 if (likely(bl)) {
179 if (bl->is_mapped)
180 ret = io_ring_buffer_select(req, len, bl, issue_flags);
181 else
182 ret = io_provided_buffer_select(req, len, bl);
183 }
184 io_ring_submit_unlock(req->ctx, issue_flags);
185 return ret;
186 }
187
__io_remove_buffers(struct io_ring_ctx * ctx,struct io_buffer_list * bl,unsigned nbufs)188 static int __io_remove_buffers(struct io_ring_ctx *ctx,
189 struct io_buffer_list *bl, unsigned nbufs)
190 {
191 unsigned i = 0;
192
193 /* shouldn't happen */
194 if (!nbufs)
195 return 0;
196
197 if (bl->is_mapped) {
198 i = bl->buf_ring->tail - bl->head;
199 if (bl->buf_nr_pages) {
200 int j;
201
202 if (!bl->is_mmap) {
203 for (j = 0; j < bl->buf_nr_pages; j++)
204 unpin_user_page(bl->buf_pages[j]);
205 }
206 io_pages_unmap(bl->buf_ring, &bl->buf_pages,
207 &bl->buf_nr_pages, bl->is_mmap);
208 bl->is_mmap = 0;
209 }
210 /* make sure it's seen as empty */
211 INIT_LIST_HEAD(&bl->buf_list);
212 bl->is_mapped = 0;
213 return i;
214 }
215
216 /* protects io_buffers_cache */
217 lockdep_assert_held(&ctx->uring_lock);
218
219 while (!list_empty(&bl->buf_list)) {
220 struct io_buffer *nxt;
221
222 nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
223 list_move(&nxt->list, &ctx->io_buffers_cache);
224 if (++i == nbufs)
225 return i;
226 cond_resched();
227 }
228
229 return i;
230 }
231
io_put_bl(struct io_ring_ctx * ctx,struct io_buffer_list * bl)232 void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
233 {
234 if (atomic_dec_and_test(&bl->refs)) {
235 __io_remove_buffers(ctx, bl, -1U);
236 kfree_rcu(bl, rcu);
237 }
238 }
239
io_destroy_buffers(struct io_ring_ctx * ctx)240 void io_destroy_buffers(struct io_ring_ctx *ctx)
241 {
242 struct io_buffer_list *bl;
243 unsigned long index;
244
245 xa_for_each(&ctx->io_bl_xa, index, bl) {
246 xa_erase(&ctx->io_bl_xa, bl->bgid);
247 io_put_bl(ctx, bl);
248 }
249
250 while (!list_empty(&ctx->io_buffers_pages)) {
251 struct page *page;
252
253 page = list_first_entry(&ctx->io_buffers_pages, struct page, lru);
254 list_del_init(&page->lru);
255 __free_page(page);
256 }
257 }
258
io_destroy_bl(struct io_ring_ctx * ctx,struct io_buffer_list * bl)259 static void io_destroy_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
260 {
261 xa_erase(&ctx->io_bl_xa, bl->bgid);
262 io_put_bl(ctx, bl);
263 }
264
io_remove_buffers_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)265 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
266 {
267 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
268 u64 tmp;
269
270 if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
271 sqe->splice_fd_in)
272 return -EINVAL;
273
274 tmp = READ_ONCE(sqe->fd);
275 if (!tmp || tmp > MAX_BIDS_PER_BGID)
276 return -EINVAL;
277
278 memset(p, 0, sizeof(*p));
279 p->nbufs = tmp;
280 p->bgid = READ_ONCE(sqe->buf_group);
281 return 0;
282 }
283
io_remove_buffers(struct io_kiocb * req,unsigned int issue_flags)284 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
285 {
286 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
287 struct io_ring_ctx *ctx = req->ctx;
288 struct io_buffer_list *bl;
289 int ret = 0;
290
291 io_ring_submit_lock(ctx, issue_flags);
292
293 ret = -ENOENT;
294 bl = io_buffer_get_list(ctx, p->bgid);
295 if (bl) {
296 ret = -EINVAL;
297 /* can't use provide/remove buffers command on mapped buffers */
298 if (!bl->is_mapped)
299 ret = __io_remove_buffers(ctx, bl, p->nbufs);
300 }
301 io_ring_submit_unlock(ctx, issue_flags);
302 if (ret < 0)
303 req_set_fail(req);
304 io_req_set_res(req, ret, 0);
305 return IOU_OK;
306 }
307
io_provide_buffers_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)308 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
309 {
310 unsigned long size, tmp_check;
311 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
312 u64 tmp;
313
314 if (sqe->rw_flags || sqe->splice_fd_in)
315 return -EINVAL;
316
317 tmp = READ_ONCE(sqe->fd);
318 if (!tmp || tmp > MAX_BIDS_PER_BGID)
319 return -E2BIG;
320 p->nbufs = tmp;
321 p->addr = READ_ONCE(sqe->addr);
322 p->len = READ_ONCE(sqe->len);
323 if (!p->len)
324 return -EINVAL;
325
326 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
327 &size))
328 return -EOVERFLOW;
329 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
330 return -EOVERFLOW;
331
332 size = (unsigned long)p->len * p->nbufs;
333 if (!access_ok(u64_to_user_ptr(p->addr), size))
334 return -EFAULT;
335
336 p->bgid = READ_ONCE(sqe->buf_group);
337 tmp = READ_ONCE(sqe->off);
338 if (tmp > USHRT_MAX)
339 return -E2BIG;
340 if (tmp + p->nbufs > MAX_BIDS_PER_BGID)
341 return -EINVAL;
342 p->bid = tmp;
343 return 0;
344 }
345
io_refill_buffer_cache(struct io_ring_ctx * ctx)346 static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
347 {
348 struct io_buffer *buf;
349 struct page *page;
350 int bufs_in_page;
351
352 /*
353 * Completions that don't happen inline (eg not under uring_lock) will
354 * add to ->io_buffers_comp. If we don't have any free buffers, check
355 * the completion list and splice those entries first.
356 */
357 if (!list_empty_careful(&ctx->io_buffers_comp)) {
358 spin_lock(&ctx->completion_lock);
359 if (!list_empty(&ctx->io_buffers_comp)) {
360 list_splice_init(&ctx->io_buffers_comp,
361 &ctx->io_buffers_cache);
362 spin_unlock(&ctx->completion_lock);
363 return 0;
364 }
365 spin_unlock(&ctx->completion_lock);
366 }
367
368 /*
369 * No free buffers and no completion entries either. Allocate a new
370 * page worth of buffer entries and add those to our freelist.
371 */
372 page = alloc_page(GFP_KERNEL_ACCOUNT);
373 if (!page)
374 return -ENOMEM;
375
376 list_add(&page->lru, &ctx->io_buffers_pages);
377
378 buf = page_address(page);
379 bufs_in_page = PAGE_SIZE / sizeof(*buf);
380 while (bufs_in_page) {
381 list_add_tail(&buf->list, &ctx->io_buffers_cache);
382 buf++;
383 bufs_in_page--;
384 }
385
386 return 0;
387 }
388
io_add_buffers(struct io_ring_ctx * ctx,struct io_provide_buf * pbuf,struct io_buffer_list * bl)389 static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
390 struct io_buffer_list *bl)
391 {
392 struct io_buffer *buf;
393 u64 addr = pbuf->addr;
394 int i, bid = pbuf->bid;
395
396 for (i = 0; i < pbuf->nbufs; i++) {
397 if (list_empty(&ctx->io_buffers_cache) &&
398 io_refill_buffer_cache(ctx))
399 break;
400 buf = list_first_entry(&ctx->io_buffers_cache, struct io_buffer,
401 list);
402 list_move_tail(&buf->list, &bl->buf_list);
403 buf->addr = addr;
404 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
405 buf->bid = bid;
406 buf->bgid = pbuf->bgid;
407 addr += pbuf->len;
408 bid++;
409 cond_resched();
410 }
411
412 return i ? 0 : -ENOMEM;
413 }
414
io_provide_buffers(struct io_kiocb * req,unsigned int issue_flags)415 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
416 {
417 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
418 struct io_ring_ctx *ctx = req->ctx;
419 struct io_buffer_list *bl;
420 int ret = 0;
421
422 io_ring_submit_lock(ctx, issue_flags);
423
424 bl = io_buffer_get_list(ctx, p->bgid);
425 if (unlikely(!bl)) {
426 bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
427 if (!bl) {
428 ret = -ENOMEM;
429 goto err;
430 }
431 INIT_LIST_HEAD(&bl->buf_list);
432 ret = io_buffer_add_list(ctx, bl, p->bgid);
433 if (ret) {
434 /*
435 * Doesn't need rcu free as it was never visible, but
436 * let's keep it consistent throughout.
437 */
438 kfree_rcu(bl, rcu);
439 goto err;
440 }
441 }
442 /* can't add buffers via this command for a mapped buffer ring */
443 if (bl->is_mapped) {
444 ret = -EINVAL;
445 goto err;
446 }
447
448 ret = io_add_buffers(ctx, p, bl);
449 err:
450 io_ring_submit_unlock(ctx, issue_flags);
451
452 if (ret < 0)
453 req_set_fail(req);
454 io_req_set_res(req, ret, 0);
455 return IOU_OK;
456 }
457
io_pin_pbuf_ring(struct io_uring_buf_reg * reg,struct io_buffer_list * bl)458 static int io_pin_pbuf_ring(struct io_uring_buf_reg *reg,
459 struct io_buffer_list *bl)
460 {
461 struct io_uring_buf_ring *br = NULL;
462 struct page **pages;
463 int nr_pages, ret;
464
465 pages = io_pin_pages(reg->ring_addr,
466 flex_array_size(br, bufs, reg->ring_entries),
467 &nr_pages);
468 if (IS_ERR(pages))
469 return PTR_ERR(pages);
470
471 br = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
472 if (!br) {
473 ret = -ENOMEM;
474 goto error_unpin;
475 }
476
477 #ifdef SHM_COLOUR
478 /*
479 * On platforms that have specific aliasing requirements, SHM_COLOUR
480 * is set and we must guarantee that the kernel and user side align
481 * nicely. We cannot do that if IOU_PBUF_RING_MMAP isn't set and
482 * the application mmap's the provided ring buffer. Fail the request
483 * if we, by chance, don't end up with aligned addresses. The app
484 * should use IOU_PBUF_RING_MMAP instead, and liburing will handle
485 * this transparently.
486 */
487 if ((reg->ring_addr | (unsigned long) br) & (SHM_COLOUR - 1)) {
488 ret = -EINVAL;
489 goto error_unpin;
490 }
491 #endif
492 bl->buf_pages = pages;
493 bl->buf_nr_pages = nr_pages;
494 bl->buf_ring = br;
495 bl->is_mapped = 1;
496 bl->is_mmap = 0;
497 return 0;
498 error_unpin:
499 unpin_user_pages(pages, nr_pages);
500 kvfree(pages);
501 vunmap(br);
502 return ret;
503 }
504
io_alloc_pbuf_ring(struct io_ring_ctx * ctx,struct io_uring_buf_reg * reg,struct io_buffer_list * bl)505 static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx,
506 struct io_uring_buf_reg *reg,
507 struct io_buffer_list *bl)
508 {
509 size_t ring_size;
510
511 ring_size = reg->ring_entries * sizeof(struct io_uring_buf_ring);
512
513 bl->buf_ring = io_pages_map(&bl->buf_pages, &bl->buf_nr_pages, ring_size);
514 if (IS_ERR(bl->buf_ring)) {
515 bl->buf_ring = NULL;
516 return -ENOMEM;
517 }
518 bl->is_mapped = 1;
519 bl->is_mmap = 1;
520 return 0;
521 }
522
io_register_pbuf_ring(struct io_ring_ctx * ctx,void __user * arg)523 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
524 {
525 struct io_uring_buf_reg reg;
526 struct io_buffer_list *bl, *free_bl = NULL;
527 int ret;
528
529 lockdep_assert_held(&ctx->uring_lock);
530
531 if (copy_from_user(®, arg, sizeof(reg)))
532 return -EFAULT;
533
534 if (reg.resv[0] || reg.resv[1] || reg.resv[2])
535 return -EINVAL;
536 if (reg.flags & ~IOU_PBUF_RING_MMAP)
537 return -EINVAL;
538 if (!(reg.flags & IOU_PBUF_RING_MMAP)) {
539 if (!reg.ring_addr)
540 return -EFAULT;
541 if (reg.ring_addr & ~PAGE_MASK)
542 return -EINVAL;
543 } else {
544 if (reg.ring_addr)
545 return -EINVAL;
546 }
547
548 if (!is_power_of_2(reg.ring_entries))
549 return -EINVAL;
550
551 /* cannot disambiguate full vs empty due to head/tail size */
552 if (reg.ring_entries >= 65536)
553 return -EINVAL;
554
555 bl = io_buffer_get_list(ctx, reg.bgid);
556 if (bl) {
557 /* if mapped buffer ring OR classic exists, don't allow */
558 if (bl->is_mapped || !list_empty(&bl->buf_list))
559 return -EEXIST;
560 io_destroy_bl(ctx, bl);
561 }
562
563 free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
564 if (!bl)
565 return -ENOMEM;
566
567 if (!(reg.flags & IOU_PBUF_RING_MMAP))
568 ret = io_pin_pbuf_ring(®, bl);
569 else
570 ret = io_alloc_pbuf_ring(ctx, ®, bl);
571
572 if (!ret) {
573 bl->nr_entries = reg.ring_entries;
574 bl->mask = reg.ring_entries - 1;
575
576 io_buffer_add_list(ctx, bl, reg.bgid);
577 return 0;
578 }
579
580 kfree_rcu(free_bl, rcu);
581 return ret;
582 }
583
io_unregister_pbuf_ring(struct io_ring_ctx * ctx,void __user * arg)584 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
585 {
586 struct io_uring_buf_reg reg;
587 struct io_buffer_list *bl;
588
589 lockdep_assert_held(&ctx->uring_lock);
590
591 if (copy_from_user(®, arg, sizeof(reg)))
592 return -EFAULT;
593 if (reg.resv[0] || reg.resv[1] || reg.resv[2])
594 return -EINVAL;
595 if (reg.flags)
596 return -EINVAL;
597
598 bl = io_buffer_get_list(ctx, reg.bgid);
599 if (!bl)
600 return -ENOENT;
601 if (!bl->is_mapped)
602 return -EINVAL;
603
604 xa_erase(&ctx->io_bl_xa, bl->bgid);
605 io_put_bl(ctx, bl);
606 return 0;
607 }
608
io_pbuf_get_bl(struct io_ring_ctx * ctx,unsigned long bgid)609 struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
610 unsigned long bgid)
611 {
612 struct io_buffer_list *bl;
613 bool ret;
614
615 /*
616 * We have to be a bit careful here - we're inside mmap and cannot grab
617 * the uring_lock. This means the buffer_list could be simultaneously
618 * going away, if someone is trying to be sneaky. Look it up under rcu
619 * so we know it's not going away, and attempt to grab a reference to
620 * it. If the ref is already zero, then fail the mapping. If successful,
621 * the caller will call io_put_bl() to drop the the reference at at the
622 * end. This may then safely free the buffer_list (and drop the pages)
623 * at that point, vm_insert_pages() would've already grabbed the
624 * necessary vma references.
625 */
626 rcu_read_lock();
627 bl = xa_load(&ctx->io_bl_xa, bgid);
628 /* must be a mmap'able buffer ring and have pages */
629 ret = false;
630 if (bl && bl->is_mmap)
631 ret = atomic_inc_not_zero(&bl->refs);
632 rcu_read_unlock();
633
634 if (ret)
635 return bl;
636
637 return ERR_PTR(-EINVAL);
638 }
639
io_pbuf_mmap(struct file * file,struct vm_area_struct * vma)640 int io_pbuf_mmap(struct file *file, struct vm_area_struct *vma)
641 {
642 struct io_ring_ctx *ctx = file->private_data;
643 loff_t pgoff = vma->vm_pgoff << PAGE_SHIFT;
644 struct io_buffer_list *bl;
645 int bgid, ret;
646
647 bgid = (pgoff & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT;
648 bl = io_pbuf_get_bl(ctx, bgid);
649 if (IS_ERR(bl))
650 return PTR_ERR(bl);
651
652 ret = io_uring_mmap_pages(ctx, vma, bl->buf_pages, bl->buf_nr_pages);
653 io_put_bl(ctx, bl);
654 return ret;
655 }
656