1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/poll.h>
10 #include <linux/io_uring.h>
11
12 #include <uapi/linux/io_uring.h>
13
14 #include "io_uring.h"
15 #include "opdef.h"
16 #include "kbuf.h"
17
18 #define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf))
19
20 /* BIDs are addressed by a 16-bit field in a CQE */
21 #define MAX_BIDS_PER_BGID (1 << 16)
22
23 struct io_provide_buf {
24 struct file *file;
25 __u64 addr;
26 __u32 len;
27 __u32 bgid;
28 __u32 nbufs;
29 __u16 bid;
30 };
31
__io_buffer_get_list(struct io_ring_ctx * ctx,unsigned int bgid)32 static inline struct io_buffer_list *__io_buffer_get_list(struct io_ring_ctx *ctx,
33 unsigned int bgid)
34 {
35 return xa_load(&ctx->io_bl_xa, bgid);
36 }
37
38 struct io_buf_free {
39 struct hlist_node list;
40 void *mem;
41 size_t size;
42 int inuse;
43 };
44
io_buffer_get_list(struct io_ring_ctx * ctx,unsigned int bgid)45 static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
46 unsigned int bgid)
47 {
48 lockdep_assert_held(&ctx->uring_lock);
49
50 return __io_buffer_get_list(ctx, bgid);
51 }
52
io_buffer_add_list(struct io_ring_ctx * ctx,struct io_buffer_list * bl,unsigned int bgid)53 static int io_buffer_add_list(struct io_ring_ctx *ctx,
54 struct io_buffer_list *bl, unsigned int bgid)
55 {
56 /*
57 * Store buffer group ID and finally mark the list as visible.
58 * The normal lookup doesn't care about the visibility as we're
59 * always under the ->uring_lock, but the RCU lookup from mmap does.
60 */
61 bl->bgid = bgid;
62 atomic_set(&bl->refs, 1);
63 return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
64 }
65
io_kbuf_recycle_legacy(struct io_kiocb * req,unsigned issue_flags)66 void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
67 {
68 struct io_ring_ctx *ctx = req->ctx;
69 struct io_buffer_list *bl;
70 struct io_buffer *buf;
71
72 /*
73 * For legacy provided buffer mode, don't recycle if we already did
74 * IO to this buffer. For ring-mapped provided buffer mode, we should
75 * increment ring->head to explicitly monopolize the buffer to avoid
76 * multiple use.
77 */
78 if (req->flags & REQ_F_PARTIAL_IO)
79 return;
80
81 io_ring_submit_lock(ctx, issue_flags);
82
83 buf = req->kbuf;
84 bl = io_buffer_get_list(ctx, buf->bgid);
85 list_add(&buf->list, &bl->buf_list);
86 req->flags &= ~REQ_F_BUFFER_SELECTED;
87 req->buf_index = buf->bgid;
88
89 io_ring_submit_unlock(ctx, issue_flags);
90 return;
91 }
92
__io_put_kbuf(struct io_kiocb * req,unsigned issue_flags)93 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
94 {
95 unsigned int cflags;
96
97 /*
98 * We can add this buffer back to two lists:
99 *
100 * 1) The io_buffers_cache list. This one is protected by the
101 * ctx->uring_lock. If we already hold this lock, add back to this
102 * list as we can grab it from issue as well.
103 * 2) The io_buffers_comp list. This one is protected by the
104 * ctx->completion_lock.
105 *
106 * We migrate buffers from the comp_list to the issue cache list
107 * when we need one.
108 */
109 if (req->flags & REQ_F_BUFFER_RING) {
110 /* no buffers to recycle for this case */
111 cflags = __io_put_kbuf_list(req, NULL);
112 } else if (issue_flags & IO_URING_F_UNLOCKED) {
113 struct io_ring_ctx *ctx = req->ctx;
114
115 spin_lock(&ctx->completion_lock);
116 cflags = __io_put_kbuf_list(req, &ctx->io_buffers_comp);
117 spin_unlock(&ctx->completion_lock);
118 } else {
119 lockdep_assert_held(&req->ctx->uring_lock);
120
121 cflags = __io_put_kbuf_list(req, &req->ctx->io_buffers_cache);
122 }
123 return cflags;
124 }
125
io_provided_buffer_select(struct io_kiocb * req,size_t * len,struct io_buffer_list * bl)126 static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
127 struct io_buffer_list *bl)
128 {
129 if (!list_empty(&bl->buf_list)) {
130 struct io_buffer *kbuf;
131
132 kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
133 list_del(&kbuf->list);
134 if (*len == 0 || *len > kbuf->len)
135 *len = kbuf->len;
136 req->flags |= REQ_F_BUFFER_SELECTED;
137 req->kbuf = kbuf;
138 req->buf_index = kbuf->bid;
139 return u64_to_user_ptr(kbuf->addr);
140 }
141 return NULL;
142 }
143
io_ring_buffer_select(struct io_kiocb * req,size_t * len,struct io_buffer_list * bl,unsigned int issue_flags)144 static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
145 struct io_buffer_list *bl,
146 unsigned int issue_flags)
147 {
148 struct io_uring_buf_ring *br = bl->buf_ring;
149 struct io_uring_buf *buf;
150 __u16 head = bl->head;
151
152 if (unlikely(smp_load_acquire(&br->tail) == head))
153 return NULL;
154
155 head &= bl->mask;
156 /* mmaped buffers are always contig */
157 if (bl->is_mmap || head < IO_BUFFER_LIST_BUF_PER_PAGE) {
158 buf = &br->bufs[head];
159 } else {
160 int off = head & (IO_BUFFER_LIST_BUF_PER_PAGE - 1);
161 int index = head / IO_BUFFER_LIST_BUF_PER_PAGE;
162 buf = page_address(bl->buf_pages[index]);
163 buf += off;
164 }
165 if (*len == 0 || *len > buf->len)
166 *len = buf->len;
167 req->flags |= REQ_F_BUFFER_RING;
168 req->buf_list = bl;
169 req->buf_index = buf->bid;
170
171 if (issue_flags & IO_URING_F_UNLOCKED ||
172 (req->file && !file_can_poll(req->file))) {
173 /*
174 * If we came in unlocked, we have no choice but to consume the
175 * buffer here, otherwise nothing ensures that the buffer won't
176 * get used by others. This does mean it'll be pinned until the
177 * IO completes, coming in unlocked means we're being called from
178 * io-wq context and there may be further retries in async hybrid
179 * mode. For the locked case, the caller must call commit when
180 * the transfer completes (or if we get -EAGAIN and must poll of
181 * retry).
182 */
183 req->buf_list = NULL;
184 bl->head++;
185 }
186 return u64_to_user_ptr(buf->addr);
187 }
188
io_buffer_select(struct io_kiocb * req,size_t * len,unsigned int issue_flags)189 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
190 unsigned int issue_flags)
191 {
192 struct io_ring_ctx *ctx = req->ctx;
193 struct io_buffer_list *bl;
194 void __user *ret = NULL;
195
196 io_ring_submit_lock(req->ctx, issue_flags);
197
198 bl = io_buffer_get_list(ctx, req->buf_index);
199 if (likely(bl)) {
200 if (bl->is_mapped)
201 ret = io_ring_buffer_select(req, len, bl, issue_flags);
202 else
203 ret = io_provided_buffer_select(req, len, bl);
204 }
205 io_ring_submit_unlock(req->ctx, issue_flags);
206 return ret;
207 }
208
209 /*
210 * Mark the given mapped range as free for reuse
211 */
io_kbuf_mark_free(struct io_ring_ctx * ctx,struct io_buffer_list * bl)212 static void io_kbuf_mark_free(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
213 {
214 struct io_buf_free *ibf;
215
216 hlist_for_each_entry(ibf, &ctx->io_buf_list, list) {
217 if (bl->buf_ring == ibf->mem) {
218 ibf->inuse = 0;
219 return;
220 }
221 }
222
223 /* can't happen... */
224 WARN_ON_ONCE(1);
225 }
226
__io_remove_buffers(struct io_ring_ctx * ctx,struct io_buffer_list * bl,unsigned nbufs)227 static int __io_remove_buffers(struct io_ring_ctx *ctx,
228 struct io_buffer_list *bl, unsigned nbufs)
229 {
230 unsigned i = 0;
231
232 /* shouldn't happen */
233 if (!nbufs)
234 return 0;
235
236 if (bl->is_mapped) {
237 i = bl->buf_ring->tail - bl->head;
238 if (bl->is_mmap) {
239 /*
240 * io_kbuf_list_free() will free the page(s) at
241 * ->release() time.
242 */
243 io_kbuf_mark_free(ctx, bl);
244 bl->buf_ring = NULL;
245 bl->is_mmap = 0;
246 } else if (bl->buf_nr_pages) {
247 int j;
248
249 for (j = 0; j < bl->buf_nr_pages; j++)
250 unpin_user_page(bl->buf_pages[j]);
251 kvfree(bl->buf_pages);
252 bl->buf_pages = NULL;
253 bl->buf_nr_pages = 0;
254 }
255 /* make sure it's seen as empty */
256 INIT_LIST_HEAD(&bl->buf_list);
257 bl->is_mapped = 0;
258 return i;
259 }
260
261 /* protects io_buffers_cache */
262 lockdep_assert_held(&ctx->uring_lock);
263
264 while (!list_empty(&bl->buf_list)) {
265 struct io_buffer *nxt;
266
267 nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
268 list_move(&nxt->list, &ctx->io_buffers_cache);
269 if (++i == nbufs)
270 return i;
271 cond_resched();
272 }
273
274 return i;
275 }
276
io_put_bl(struct io_ring_ctx * ctx,struct io_buffer_list * bl)277 void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
278 {
279 if (atomic_dec_and_test(&bl->refs)) {
280 __io_remove_buffers(ctx, bl, -1U);
281 kfree_rcu(bl, rcu);
282 }
283 }
284
io_destroy_buffers(struct io_ring_ctx * ctx)285 void io_destroy_buffers(struct io_ring_ctx *ctx)
286 {
287 struct io_buffer_list *bl;
288 unsigned long index;
289
290 xa_for_each(&ctx->io_bl_xa, index, bl) {
291 xa_erase(&ctx->io_bl_xa, bl->bgid);
292 io_put_bl(ctx, bl);
293 }
294
295 while (!list_empty(&ctx->io_buffers_pages)) {
296 struct page *page;
297
298 page = list_first_entry(&ctx->io_buffers_pages, struct page, lru);
299 list_del_init(&page->lru);
300 __free_page(page);
301 }
302 }
303
io_destroy_bl(struct io_ring_ctx * ctx,struct io_buffer_list * bl)304 static void io_destroy_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
305 {
306 xa_erase(&ctx->io_bl_xa, bl->bgid);
307 io_put_bl(ctx, bl);
308 }
309
io_remove_buffers_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)310 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
311 {
312 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
313 u64 tmp;
314
315 if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
316 sqe->splice_fd_in)
317 return -EINVAL;
318
319 tmp = READ_ONCE(sqe->fd);
320 if (!tmp || tmp > MAX_BIDS_PER_BGID)
321 return -EINVAL;
322
323 memset(p, 0, sizeof(*p));
324 p->nbufs = tmp;
325 p->bgid = READ_ONCE(sqe->buf_group);
326 return 0;
327 }
328
io_remove_buffers(struct io_kiocb * req,unsigned int issue_flags)329 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
330 {
331 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
332 struct io_ring_ctx *ctx = req->ctx;
333 struct io_buffer_list *bl;
334 int ret = 0;
335
336 io_ring_submit_lock(ctx, issue_flags);
337
338 ret = -ENOENT;
339 bl = io_buffer_get_list(ctx, p->bgid);
340 if (bl) {
341 ret = -EINVAL;
342 /* can't use provide/remove buffers command on mapped buffers */
343 if (!bl->is_mapped)
344 ret = __io_remove_buffers(ctx, bl, p->nbufs);
345 }
346 io_ring_submit_unlock(ctx, issue_flags);
347 if (ret < 0)
348 req_set_fail(req);
349 io_req_set_res(req, ret, 0);
350 return IOU_OK;
351 }
352
io_provide_buffers_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)353 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
354 {
355 unsigned long size, tmp_check;
356 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
357 u64 tmp;
358
359 if (sqe->rw_flags || sqe->splice_fd_in)
360 return -EINVAL;
361
362 tmp = READ_ONCE(sqe->fd);
363 if (!tmp || tmp > MAX_BIDS_PER_BGID)
364 return -E2BIG;
365 p->nbufs = tmp;
366 p->addr = READ_ONCE(sqe->addr);
367 p->len = READ_ONCE(sqe->len);
368
369 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
370 &size))
371 return -EOVERFLOW;
372 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
373 return -EOVERFLOW;
374
375 size = (unsigned long)p->len * p->nbufs;
376 if (!access_ok(u64_to_user_ptr(p->addr), size))
377 return -EFAULT;
378
379 p->bgid = READ_ONCE(sqe->buf_group);
380 tmp = READ_ONCE(sqe->off);
381 if (tmp > USHRT_MAX)
382 return -E2BIG;
383 if (tmp + p->nbufs > MAX_BIDS_PER_BGID)
384 return -EINVAL;
385 p->bid = tmp;
386 return 0;
387 }
388
io_refill_buffer_cache(struct io_ring_ctx * ctx)389 static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
390 {
391 struct io_buffer *buf;
392 struct page *page;
393 int bufs_in_page;
394
395 /*
396 * Completions that don't happen inline (eg not under uring_lock) will
397 * add to ->io_buffers_comp. If we don't have any free buffers, check
398 * the completion list and splice those entries first.
399 */
400 if (!list_empty_careful(&ctx->io_buffers_comp)) {
401 spin_lock(&ctx->completion_lock);
402 if (!list_empty(&ctx->io_buffers_comp)) {
403 list_splice_init(&ctx->io_buffers_comp,
404 &ctx->io_buffers_cache);
405 spin_unlock(&ctx->completion_lock);
406 return 0;
407 }
408 spin_unlock(&ctx->completion_lock);
409 }
410
411 /*
412 * No free buffers and no completion entries either. Allocate a new
413 * page worth of buffer entries and add those to our freelist.
414 */
415 page = alloc_page(GFP_KERNEL_ACCOUNT);
416 if (!page)
417 return -ENOMEM;
418
419 list_add(&page->lru, &ctx->io_buffers_pages);
420
421 buf = page_address(page);
422 bufs_in_page = PAGE_SIZE / sizeof(*buf);
423 while (bufs_in_page) {
424 list_add_tail(&buf->list, &ctx->io_buffers_cache);
425 buf++;
426 bufs_in_page--;
427 }
428
429 return 0;
430 }
431
io_add_buffers(struct io_ring_ctx * ctx,struct io_provide_buf * pbuf,struct io_buffer_list * bl)432 static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
433 struct io_buffer_list *bl)
434 {
435 struct io_buffer *buf;
436 u64 addr = pbuf->addr;
437 int i, bid = pbuf->bid;
438
439 for (i = 0; i < pbuf->nbufs; i++) {
440 if (list_empty(&ctx->io_buffers_cache) &&
441 io_refill_buffer_cache(ctx))
442 break;
443 buf = list_first_entry(&ctx->io_buffers_cache, struct io_buffer,
444 list);
445 list_move_tail(&buf->list, &bl->buf_list);
446 buf->addr = addr;
447 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
448 buf->bid = bid;
449 buf->bgid = pbuf->bgid;
450 addr += pbuf->len;
451 bid++;
452 cond_resched();
453 }
454
455 return i ? 0 : -ENOMEM;
456 }
457
io_provide_buffers(struct io_kiocb * req,unsigned int issue_flags)458 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
459 {
460 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
461 struct io_ring_ctx *ctx = req->ctx;
462 struct io_buffer_list *bl;
463 int ret = 0;
464
465 io_ring_submit_lock(ctx, issue_flags);
466
467 bl = io_buffer_get_list(ctx, p->bgid);
468 if (unlikely(!bl)) {
469 bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
470 if (!bl) {
471 ret = -ENOMEM;
472 goto err;
473 }
474 INIT_LIST_HEAD(&bl->buf_list);
475 ret = io_buffer_add_list(ctx, bl, p->bgid);
476 if (ret) {
477 /*
478 * Doesn't need rcu free as it was never visible, but
479 * let's keep it consistent throughout.
480 */
481 kfree_rcu(bl, rcu);
482 goto err;
483 }
484 }
485 /* can't add buffers via this command for a mapped buffer ring */
486 if (bl->is_mapped) {
487 ret = -EINVAL;
488 goto err;
489 }
490
491 ret = io_add_buffers(ctx, p, bl);
492 err:
493 io_ring_submit_unlock(ctx, issue_flags);
494
495 if (ret < 0)
496 req_set_fail(req);
497 io_req_set_res(req, ret, 0);
498 return IOU_OK;
499 }
500
io_pin_pbuf_ring(struct io_uring_buf_reg * reg,struct io_buffer_list * bl)501 static int io_pin_pbuf_ring(struct io_uring_buf_reg *reg,
502 struct io_buffer_list *bl)
503 {
504 struct io_uring_buf_ring *br;
505 struct page **pages;
506 int i, nr_pages;
507
508 pages = io_pin_pages(reg->ring_addr,
509 flex_array_size(br, bufs, reg->ring_entries),
510 &nr_pages);
511 if (IS_ERR(pages))
512 return PTR_ERR(pages);
513
514 /*
515 * Apparently some 32-bit boxes (ARM) will return highmem pages,
516 * which then need to be mapped. We could support that, but it'd
517 * complicate the code and slowdown the common cases quite a bit.
518 * So just error out, returning -EINVAL just like we did on kernels
519 * that didn't support mapped buffer rings.
520 */
521 for (i = 0; i < nr_pages; i++)
522 if (PageHighMem(pages[i]))
523 goto error_unpin;
524
525 br = page_address(pages[0]);
526 #ifdef SHM_COLOUR
527 /*
528 * On platforms that have specific aliasing requirements, SHM_COLOUR
529 * is set and we must guarantee that the kernel and user side align
530 * nicely. We cannot do that if IOU_PBUF_RING_MMAP isn't set and
531 * the application mmap's the provided ring buffer. Fail the request
532 * if we, by chance, don't end up with aligned addresses. The app
533 * should use IOU_PBUF_RING_MMAP instead, and liburing will handle
534 * this transparently.
535 */
536 if ((reg->ring_addr | (unsigned long) br) & (SHM_COLOUR - 1))
537 goto error_unpin;
538 #endif
539 bl->buf_pages = pages;
540 bl->buf_nr_pages = nr_pages;
541 bl->buf_ring = br;
542 bl->is_mapped = 1;
543 bl->is_mmap = 0;
544 return 0;
545 error_unpin:
546 for (i = 0; i < nr_pages; i++)
547 unpin_user_page(pages[i]);
548 kvfree(pages);
549 return -EINVAL;
550 }
551
552 /*
553 * See if we have a suitable region that we can reuse, rather than allocate
554 * both a new io_buf_free and mem region again. We leave it on the list as
555 * even a reused entry will need freeing at ring release.
556 */
io_lookup_buf_free_entry(struct io_ring_ctx * ctx,size_t ring_size)557 static struct io_buf_free *io_lookup_buf_free_entry(struct io_ring_ctx *ctx,
558 size_t ring_size)
559 {
560 struct io_buf_free *ibf, *best = NULL;
561 size_t best_dist;
562
563 hlist_for_each_entry(ibf, &ctx->io_buf_list, list) {
564 size_t dist;
565
566 if (ibf->inuse || ibf->size < ring_size)
567 continue;
568 dist = ibf->size - ring_size;
569 if (!best || dist < best_dist) {
570 best = ibf;
571 if (!dist)
572 break;
573 best_dist = dist;
574 }
575 }
576
577 return best;
578 }
579
io_alloc_pbuf_ring(struct io_ring_ctx * ctx,struct io_uring_buf_reg * reg,struct io_buffer_list * bl)580 static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx,
581 struct io_uring_buf_reg *reg,
582 struct io_buffer_list *bl)
583 {
584 struct io_buf_free *ibf;
585 size_t ring_size;
586 void *ptr;
587
588 ring_size = reg->ring_entries * sizeof(struct io_uring_buf_ring);
589
590 /* Reuse existing entry, if we can */
591 ibf = io_lookup_buf_free_entry(ctx, ring_size);
592 if (!ibf) {
593 ptr = io_mem_alloc(ring_size);
594 if (IS_ERR(ptr))
595 return PTR_ERR(ptr);
596
597 /* Allocate and store deferred free entry */
598 ibf = kmalloc(sizeof(*ibf), GFP_KERNEL_ACCOUNT);
599 if (!ibf) {
600 io_mem_free(ptr);
601 return -ENOMEM;
602 }
603 ibf->mem = ptr;
604 ibf->size = ring_size;
605 hlist_add_head(&ibf->list, &ctx->io_buf_list);
606 }
607 ibf->inuse = 1;
608 bl->buf_ring = ibf->mem;
609 bl->is_mapped = 1;
610 bl->is_mmap = 1;
611 return 0;
612 }
613
io_register_pbuf_ring(struct io_ring_ctx * ctx,void __user * arg)614 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
615 {
616 struct io_uring_buf_reg reg;
617 struct io_buffer_list *bl, *free_bl = NULL;
618 int ret;
619
620 lockdep_assert_held(&ctx->uring_lock);
621
622 if (copy_from_user(®, arg, sizeof(reg)))
623 return -EFAULT;
624
625 if (reg.resv[0] || reg.resv[1] || reg.resv[2])
626 return -EINVAL;
627 if (reg.flags & ~IOU_PBUF_RING_MMAP)
628 return -EINVAL;
629 if (!(reg.flags & IOU_PBUF_RING_MMAP)) {
630 if (!reg.ring_addr)
631 return -EFAULT;
632 if (reg.ring_addr & ~PAGE_MASK)
633 return -EINVAL;
634 } else {
635 if (reg.ring_addr)
636 return -EINVAL;
637 }
638
639 if (!is_power_of_2(reg.ring_entries))
640 return -EINVAL;
641
642 /* cannot disambiguate full vs empty due to head/tail size */
643 if (reg.ring_entries >= 65536)
644 return -EINVAL;
645
646 bl = io_buffer_get_list(ctx, reg.bgid);
647 if (bl) {
648 /* if mapped buffer ring OR classic exists, don't allow */
649 if (bl->is_mapped || !list_empty(&bl->buf_list))
650 return -EEXIST;
651 io_destroy_bl(ctx, bl);
652 }
653
654 free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
655 if (!bl)
656 return -ENOMEM;
657
658 if (!(reg.flags & IOU_PBUF_RING_MMAP))
659 ret = io_pin_pbuf_ring(®, bl);
660 else
661 ret = io_alloc_pbuf_ring(ctx, ®, bl);
662
663 if (!ret) {
664 bl->nr_entries = reg.ring_entries;
665 bl->mask = reg.ring_entries - 1;
666
667 io_buffer_add_list(ctx, bl, reg.bgid);
668 return 0;
669 }
670
671 kfree_rcu(free_bl, rcu);
672 return ret;
673 }
674
io_unregister_pbuf_ring(struct io_ring_ctx * ctx,void __user * arg)675 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
676 {
677 struct io_uring_buf_reg reg;
678 struct io_buffer_list *bl;
679
680 lockdep_assert_held(&ctx->uring_lock);
681
682 if (copy_from_user(®, arg, sizeof(reg)))
683 return -EFAULT;
684 if (reg.resv[0] || reg.resv[1] || reg.resv[2])
685 return -EINVAL;
686 if (reg.flags)
687 return -EINVAL;
688
689 bl = io_buffer_get_list(ctx, reg.bgid);
690 if (!bl)
691 return -ENOENT;
692 if (!bl->is_mapped)
693 return -EINVAL;
694
695 xa_erase(&ctx->io_bl_xa, bl->bgid);
696 io_put_bl(ctx, bl);
697 return 0;
698 }
699
io_pbuf_get_bl(struct io_ring_ctx * ctx,unsigned long bgid)700 struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
701 unsigned long bgid)
702 {
703 struct io_buffer_list *bl;
704 bool ret;
705
706 /*
707 * We have to be a bit careful here - we're inside mmap and cannot grab
708 * the uring_lock. This means the buffer_list could be simultaneously
709 * going away, if someone is trying to be sneaky. Look it up under rcu
710 * so we know it's not going away, and attempt to grab a reference to
711 * it. If the ref is already zero, then fail the mapping. If successful,
712 * the caller will call io_put_bl() to drop the the reference at at the
713 * end. This may then safely free the buffer_list (and drop the pages)
714 * at that point, vm_insert_pages() would've already grabbed the
715 * necessary vma references.
716 */
717 rcu_read_lock();
718 bl = xa_load(&ctx->io_bl_xa, bgid);
719 /* must be a mmap'able buffer ring and have pages */
720 ret = false;
721 if (bl && bl->is_mmap)
722 ret = atomic_inc_not_zero(&bl->refs);
723 rcu_read_unlock();
724
725 if (ret)
726 return bl;
727
728 return ERR_PTR(-EINVAL);
729 }
730
731 /*
732 * Called at or after ->release(), free the mmap'ed buffers that we used
733 * for memory mapped provided buffer rings.
734 */
io_kbuf_mmap_list_free(struct io_ring_ctx * ctx)735 void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx)
736 {
737 struct io_buf_free *ibf;
738 struct hlist_node *tmp;
739
740 hlist_for_each_entry_safe(ibf, tmp, &ctx->io_buf_list, list) {
741 hlist_del(&ibf->list);
742 io_mem_free(ibf->mem);
743 kfree(ibf);
744 }
745 }
746