13b77495aSJens Axboe // SPDX-License-Identifier: GPL-2.0 23b77495aSJens Axboe #include <linux/kernel.h> 33b77495aSJens Axboe #include <linux/errno.h> 43b77495aSJens Axboe #include <linux/fs.h> 53b77495aSJens Axboe #include <linux/file.h> 63b77495aSJens Axboe #include <linux/mm.h> 73b77495aSJens Axboe #include <linux/slab.h> 83b77495aSJens Axboe #include <linux/namei.h> 93b77495aSJens Axboe #include <linux/poll.h> 103b77495aSJens Axboe #include <linux/io_uring.h> 113b77495aSJens Axboe 123b77495aSJens Axboe #include <uapi/linux/io_uring.h> 133b77495aSJens Axboe 143b77495aSJens Axboe #include "io_uring.h" 153b77495aSJens Axboe #include "opdef.h" 163b77495aSJens Axboe #include "kbuf.h" 173b77495aSJens Axboe 183b77495aSJens Axboe #define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf)) 193b77495aSJens Axboe 203b77495aSJens Axboe #define BGID_ARRAY 64 213b77495aSJens Axboe 2246484864SGabriel Krisman Bertazi /* BIDs are addressed by a 16-bit field in a CQE */ 2346484864SGabriel Krisman Bertazi #define MAX_BIDS_PER_BGID (1 << 16) 2446484864SGabriel Krisman Bertazi 253b77495aSJens Axboe struct io_provide_buf { 263b77495aSJens Axboe struct file *file; 273b77495aSJens Axboe __u64 addr; 283b77495aSJens Axboe __u32 len; 293b77495aSJens Axboe __u32 bgid; 3046484864SGabriel Krisman Bertazi __u32 nbufs; 313b77495aSJens Axboe __u16 bid; 323b77495aSJens Axboe }; 333b77495aSJens Axboe 34*09f75200SJens Axboe static struct io_buffer_list *__io_buffer_get_list(struct io_ring_ctx *ctx, 35*09f75200SJens Axboe struct io_buffer_list *bl, 36*09f75200SJens Axboe unsigned int bgid) 37*09f75200SJens Axboe { 38*09f75200SJens Axboe if (bl && bgid < BGID_ARRAY) 39*09f75200SJens Axboe return &bl[bgid]; 40*09f75200SJens Axboe 41*09f75200SJens Axboe return xa_load(&ctx->io_bl_xa, bgid); 42*09f75200SJens Axboe } 43*09f75200SJens Axboe 443b77495aSJens Axboe static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx, 453b77495aSJens Axboe unsigned int bgid) 463b77495aSJens Axboe { 47*09f75200SJens Axboe lockdep_assert_held(&ctx->uring_lock); 483b77495aSJens Axboe 49*09f75200SJens Axboe return __io_buffer_get_list(ctx, ctx->io_bl, bgid); 503b77495aSJens Axboe } 513b77495aSJens Axboe 52024b8fdeSHao Xu static int io_buffer_add_list(struct io_ring_ctx *ctx, 53024b8fdeSHao Xu struct io_buffer_list *bl, unsigned int bgid) 54024b8fdeSHao Xu { 55*09f75200SJens Axboe /* 56*09f75200SJens Axboe * Store buffer group ID and finally mark the list as visible. 57*09f75200SJens Axboe * The normal lookup doesn't care about the visibility as we're 58*09f75200SJens Axboe * always under the ->uring_lock, but the RCU lookup from mmap does. 59*09f75200SJens Axboe */ 60024b8fdeSHao Xu bl->bgid = bgid; 61*09f75200SJens Axboe smp_store_release(&bl->is_ready, 1); 62*09f75200SJens Axboe 63024b8fdeSHao Xu if (bgid < BGID_ARRAY) 64024b8fdeSHao Xu return 0; 65024b8fdeSHao Xu 66024b8fdeSHao Xu return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL)); 67024b8fdeSHao Xu } 68024b8fdeSHao Xu 69024b8fdeSHao Xu void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags) 703b77495aSJens Axboe { 713b77495aSJens Axboe struct io_ring_ctx *ctx = req->ctx; 723b77495aSJens Axboe struct io_buffer_list *bl; 733b77495aSJens Axboe struct io_buffer *buf; 743b77495aSJens Axboe 753b77495aSJens Axboe /* 76024b8fdeSHao Xu * For legacy provided buffer mode, don't recycle if we already did 77024b8fdeSHao Xu * IO to this buffer. For ring-mapped provided buffer mode, we should 78024b8fdeSHao Xu * increment ring->head to explicitly monopolize the buffer to avoid 79024b8fdeSHao Xu * multiple use. 80024b8fdeSHao Xu */ 81024b8fdeSHao Xu if (req->flags & REQ_F_PARTIAL_IO) 82024b8fdeSHao Xu return; 83024b8fdeSHao Xu 84024b8fdeSHao Xu io_ring_submit_lock(ctx, issue_flags); 85024b8fdeSHao Xu 86024b8fdeSHao Xu buf = req->kbuf; 87024b8fdeSHao Xu bl = io_buffer_get_list(ctx, buf->bgid); 88024b8fdeSHao Xu list_add(&buf->list, &bl->buf_list); 89024b8fdeSHao Xu req->flags &= ~REQ_F_BUFFER_SELECTED; 90024b8fdeSHao Xu req->buf_index = buf->bgid; 91024b8fdeSHao Xu 92024b8fdeSHao Xu io_ring_submit_unlock(ctx, issue_flags); 93024b8fdeSHao Xu return; 94024b8fdeSHao Xu } 95024b8fdeSHao Xu 9653ccf69bSPavel Begunkov unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags) 9753ccf69bSPavel Begunkov { 9853ccf69bSPavel Begunkov unsigned int cflags; 9953ccf69bSPavel Begunkov 10053ccf69bSPavel Begunkov /* 10153ccf69bSPavel Begunkov * We can add this buffer back to two lists: 10253ccf69bSPavel Begunkov * 10353ccf69bSPavel Begunkov * 1) The io_buffers_cache list. This one is protected by the 10453ccf69bSPavel Begunkov * ctx->uring_lock. If we already hold this lock, add back to this 10553ccf69bSPavel Begunkov * list as we can grab it from issue as well. 10653ccf69bSPavel Begunkov * 2) The io_buffers_comp list. This one is protected by the 10753ccf69bSPavel Begunkov * ctx->completion_lock. 10853ccf69bSPavel Begunkov * 10953ccf69bSPavel Begunkov * We migrate buffers from the comp_list to the issue cache list 11053ccf69bSPavel Begunkov * when we need one. 11153ccf69bSPavel Begunkov */ 11253ccf69bSPavel Begunkov if (req->flags & REQ_F_BUFFER_RING) { 11353ccf69bSPavel Begunkov /* no buffers to recycle for this case */ 11453ccf69bSPavel Begunkov cflags = __io_put_kbuf_list(req, NULL); 11553ccf69bSPavel Begunkov } else if (issue_flags & IO_URING_F_UNLOCKED) { 11653ccf69bSPavel Begunkov struct io_ring_ctx *ctx = req->ctx; 11753ccf69bSPavel Begunkov 11853ccf69bSPavel Begunkov spin_lock(&ctx->completion_lock); 11953ccf69bSPavel Begunkov cflags = __io_put_kbuf_list(req, &ctx->io_buffers_comp); 12053ccf69bSPavel Begunkov spin_unlock(&ctx->completion_lock); 12153ccf69bSPavel Begunkov } else { 12253ccf69bSPavel Begunkov lockdep_assert_held(&req->ctx->uring_lock); 12353ccf69bSPavel Begunkov 12453ccf69bSPavel Begunkov cflags = __io_put_kbuf_list(req, &req->ctx->io_buffers_cache); 12553ccf69bSPavel Begunkov } 12653ccf69bSPavel Begunkov return cflags; 12753ccf69bSPavel Begunkov } 12853ccf69bSPavel Begunkov 1293b77495aSJens Axboe static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len, 1303b77495aSJens Axboe struct io_buffer_list *bl) 1313b77495aSJens Axboe { 1323b77495aSJens Axboe if (!list_empty(&bl->buf_list)) { 1333b77495aSJens Axboe struct io_buffer *kbuf; 1343b77495aSJens Axboe 1353b77495aSJens Axboe kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list); 1363b77495aSJens Axboe list_del(&kbuf->list); 137b8c01559SDylan Yudaken if (*len == 0 || *len > kbuf->len) 1383b77495aSJens Axboe *len = kbuf->len; 1393b77495aSJens Axboe req->flags |= REQ_F_BUFFER_SELECTED; 1403b77495aSJens Axboe req->kbuf = kbuf; 1413b77495aSJens Axboe req->buf_index = kbuf->bid; 1423b77495aSJens Axboe return u64_to_user_ptr(kbuf->addr); 1433b77495aSJens Axboe } 1443b77495aSJens Axboe return NULL; 1453b77495aSJens Axboe } 1463b77495aSJens Axboe 1473b77495aSJens Axboe static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len, 1483b77495aSJens Axboe struct io_buffer_list *bl, 1493b77495aSJens Axboe unsigned int issue_flags) 1503b77495aSJens Axboe { 1513b77495aSJens Axboe struct io_uring_buf_ring *br = bl->buf_ring; 1523b77495aSJens Axboe struct io_uring_buf *buf; 1533b77495aSJens Axboe __u16 head = bl->head; 1543b77495aSJens Axboe 1553b77495aSJens Axboe if (unlikely(smp_load_acquire(&br->tail) == head)) 1563b77495aSJens Axboe return NULL; 1573b77495aSJens Axboe 1583b77495aSJens Axboe head &= bl->mask; 159c56e022cSJens Axboe /* mmaped buffers are always contig */ 160c56e022cSJens Axboe if (bl->is_mmap || head < IO_BUFFER_LIST_BUF_PER_PAGE) { 1613b77495aSJens Axboe buf = &br->bufs[head]; 1623b77495aSJens Axboe } else { 1633b77495aSJens Axboe int off = head & (IO_BUFFER_LIST_BUF_PER_PAGE - 1); 1643b77495aSJens Axboe int index = head / IO_BUFFER_LIST_BUF_PER_PAGE; 1653b77495aSJens Axboe buf = page_address(bl->buf_pages[index]); 1663b77495aSJens Axboe buf += off; 1673b77495aSJens Axboe } 168b8c01559SDylan Yudaken if (*len == 0 || *len > buf->len) 1693b77495aSJens Axboe *len = buf->len; 1703b77495aSJens Axboe req->flags |= REQ_F_BUFFER_RING; 1713b77495aSJens Axboe req->buf_list = bl; 1723b77495aSJens Axboe req->buf_index = buf->bid; 1733b77495aSJens Axboe 1743b77495aSJens Axboe if (issue_flags & IO_URING_F_UNLOCKED || !file_can_poll(req->file)) { 1753b77495aSJens Axboe /* 1763b77495aSJens Axboe * If we came in unlocked, we have no choice but to consume the 177f09c8643SHao Xu * buffer here, otherwise nothing ensures that the buffer won't 178f09c8643SHao Xu * get used by others. This does mean it'll be pinned until the 179f09c8643SHao Xu * IO completes, coming in unlocked means we're being called from 180f09c8643SHao Xu * io-wq context and there may be further retries in async hybrid 181f09c8643SHao Xu * mode. For the locked case, the caller must call commit when 182f09c8643SHao Xu * the transfer completes (or if we get -EAGAIN and must poll of 183f09c8643SHao Xu * retry). 1843b77495aSJens Axboe */ 1853b77495aSJens Axboe req->buf_list = NULL; 1863b77495aSJens Axboe bl->head++; 1873b77495aSJens Axboe } 1883b77495aSJens Axboe return u64_to_user_ptr(buf->addr); 1893b77495aSJens Axboe } 1903b77495aSJens Axboe 1913b77495aSJens Axboe void __user *io_buffer_select(struct io_kiocb *req, size_t *len, 1923b77495aSJens Axboe unsigned int issue_flags) 1933b77495aSJens Axboe { 1943b77495aSJens Axboe struct io_ring_ctx *ctx = req->ctx; 1953b77495aSJens Axboe struct io_buffer_list *bl; 1963b77495aSJens Axboe void __user *ret = NULL; 1973b77495aSJens Axboe 1983b77495aSJens Axboe io_ring_submit_lock(req->ctx, issue_flags); 1993b77495aSJens Axboe 2003b77495aSJens Axboe bl = io_buffer_get_list(ctx, req->buf_index); 2013b77495aSJens Axboe if (likely(bl)) { 20225a2c188SJens Axboe if (bl->is_mapped) 2033b77495aSJens Axboe ret = io_ring_buffer_select(req, len, bl, issue_flags); 2043b77495aSJens Axboe else 2053b77495aSJens Axboe ret = io_provided_buffer_select(req, len, bl); 2063b77495aSJens Axboe } 2073b77495aSJens Axboe io_ring_submit_unlock(req->ctx, issue_flags); 2083b77495aSJens Axboe return ret; 2093b77495aSJens Axboe } 2103b77495aSJens Axboe 2113b77495aSJens Axboe static __cold int io_init_bl_list(struct io_ring_ctx *ctx) 2123b77495aSJens Axboe { 213*09f75200SJens Axboe struct io_buffer_list *bl; 2143b77495aSJens Axboe int i; 2153b77495aSJens Axboe 216*09f75200SJens Axboe bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list), GFP_KERNEL); 217*09f75200SJens Axboe if (!bl) 2183b77495aSJens Axboe return -ENOMEM; 2193b77495aSJens Axboe 2203b77495aSJens Axboe for (i = 0; i < BGID_ARRAY; i++) { 221*09f75200SJens Axboe INIT_LIST_HEAD(&bl[i].buf_list); 222*09f75200SJens Axboe bl[i].bgid = i; 2233b77495aSJens Axboe } 2243b77495aSJens Axboe 225*09f75200SJens Axboe smp_store_release(&ctx->io_bl, bl); 2263b77495aSJens Axboe return 0; 2273b77495aSJens Axboe } 2283b77495aSJens Axboe 2293b77495aSJens Axboe static int __io_remove_buffers(struct io_ring_ctx *ctx, 2303b77495aSJens Axboe struct io_buffer_list *bl, unsigned nbufs) 2313b77495aSJens Axboe { 2323b77495aSJens Axboe unsigned i = 0; 2333b77495aSJens Axboe 2343b77495aSJens Axboe /* shouldn't happen */ 2353b77495aSJens Axboe if (!nbufs) 2363b77495aSJens Axboe return 0; 2373b77495aSJens Axboe 238c56e022cSJens Axboe if (bl->is_mapped) { 239c56e022cSJens Axboe i = bl->buf_ring->tail - bl->head; 240c56e022cSJens Axboe if (bl->is_mmap) { 24199a9e0b8SMatthew Wilcox (Oracle) folio_put(virt_to_folio(bl->buf_ring)); 242c56e022cSJens Axboe bl->buf_ring = NULL; 243c56e022cSJens Axboe bl->is_mmap = 0; 244c56e022cSJens Axboe } else if (bl->buf_nr_pages) { 2453b77495aSJens Axboe int j; 2463b77495aSJens Axboe 2473b77495aSJens Axboe for (j = 0; j < bl->buf_nr_pages; j++) 2483b77495aSJens Axboe unpin_user_page(bl->buf_pages[j]); 2493b77495aSJens Axboe kvfree(bl->buf_pages); 2503b77495aSJens Axboe bl->buf_pages = NULL; 2513b77495aSJens Axboe bl->buf_nr_pages = 0; 252c56e022cSJens Axboe } 2533b77495aSJens Axboe /* make sure it's seen as empty */ 2543b77495aSJens Axboe INIT_LIST_HEAD(&bl->buf_list); 25525a2c188SJens Axboe bl->is_mapped = 0; 2563b77495aSJens Axboe return i; 2573b77495aSJens Axboe } 2583b77495aSJens Axboe 259b4a72c05SWojciech Lukowicz /* protects io_buffers_cache */ 260b4a72c05SWojciech Lukowicz lockdep_assert_held(&ctx->uring_lock); 261b4a72c05SWojciech Lukowicz 2623b77495aSJens Axboe while (!list_empty(&bl->buf_list)) { 2633b77495aSJens Axboe struct io_buffer *nxt; 2643b77495aSJens Axboe 2653b77495aSJens Axboe nxt = list_first_entry(&bl->buf_list, struct io_buffer, list); 266b4a72c05SWojciech Lukowicz list_move(&nxt->list, &ctx->io_buffers_cache); 2673b77495aSJens Axboe if (++i == nbufs) 2683b77495aSJens Axboe return i; 2693b77495aSJens Axboe cond_resched(); 2703b77495aSJens Axboe } 2713b77495aSJens Axboe 2723b77495aSJens Axboe return i; 2733b77495aSJens Axboe } 2743b77495aSJens Axboe 2753b77495aSJens Axboe void io_destroy_buffers(struct io_ring_ctx *ctx) 2763b77495aSJens Axboe { 2773b77495aSJens Axboe struct io_buffer_list *bl; 2783b77495aSJens Axboe unsigned long index; 2793b77495aSJens Axboe int i; 2803b77495aSJens Axboe 2813b77495aSJens Axboe for (i = 0; i < BGID_ARRAY; i++) { 2823b77495aSJens Axboe if (!ctx->io_bl) 2833b77495aSJens Axboe break; 2843b77495aSJens Axboe __io_remove_buffers(ctx, &ctx->io_bl[i], -1U); 2853b77495aSJens Axboe } 2863b77495aSJens Axboe 2873b77495aSJens Axboe xa_for_each(&ctx->io_bl_xa, index, bl) { 2883b77495aSJens Axboe xa_erase(&ctx->io_bl_xa, bl->bgid); 2893b77495aSJens Axboe __io_remove_buffers(ctx, bl, -1U); 290*09f75200SJens Axboe kfree_rcu(bl, rcu); 2913b77495aSJens Axboe } 2923b77495aSJens Axboe 2933b77495aSJens Axboe while (!list_empty(&ctx->io_buffers_pages)) { 2943b77495aSJens Axboe struct page *page; 2953b77495aSJens Axboe 2963b77495aSJens Axboe page = list_first_entry(&ctx->io_buffers_pages, struct page, lru); 2973b77495aSJens Axboe list_del_init(&page->lru); 2983b77495aSJens Axboe __free_page(page); 2993b77495aSJens Axboe } 3003b77495aSJens Axboe } 3013b77495aSJens Axboe 3023b77495aSJens Axboe int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 3033b77495aSJens Axboe { 304f2ccb5aeSStefan Metzmacher struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf); 3053b77495aSJens Axboe u64 tmp; 3063b77495aSJens Axboe 3073b77495aSJens Axboe if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off || 3083b77495aSJens Axboe sqe->splice_fd_in) 3093b77495aSJens Axboe return -EINVAL; 3103b77495aSJens Axboe 3113b77495aSJens Axboe tmp = READ_ONCE(sqe->fd); 31246484864SGabriel Krisman Bertazi if (!tmp || tmp > MAX_BIDS_PER_BGID) 3133b77495aSJens Axboe return -EINVAL; 3143b77495aSJens Axboe 3153b77495aSJens Axboe memset(p, 0, sizeof(*p)); 3163b77495aSJens Axboe p->nbufs = tmp; 3173b77495aSJens Axboe p->bgid = READ_ONCE(sqe->buf_group); 3183b77495aSJens Axboe return 0; 3193b77495aSJens Axboe } 3203b77495aSJens Axboe 3213b77495aSJens Axboe int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags) 3223b77495aSJens Axboe { 323f2ccb5aeSStefan Metzmacher struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf); 3243b77495aSJens Axboe struct io_ring_ctx *ctx = req->ctx; 3253b77495aSJens Axboe struct io_buffer_list *bl; 3263b77495aSJens Axboe int ret = 0; 3273b77495aSJens Axboe 3283b77495aSJens Axboe io_ring_submit_lock(ctx, issue_flags); 3293b77495aSJens Axboe 3303b77495aSJens Axboe ret = -ENOENT; 3313b77495aSJens Axboe bl = io_buffer_get_list(ctx, p->bgid); 3323b77495aSJens Axboe if (bl) { 3333b77495aSJens Axboe ret = -EINVAL; 3343b77495aSJens Axboe /* can't use provide/remove buffers command on mapped buffers */ 33525a2c188SJens Axboe if (!bl->is_mapped) 3363b77495aSJens Axboe ret = __io_remove_buffers(ctx, bl, p->nbufs); 3373b77495aSJens Axboe } 338c3b49093SPavel Begunkov io_ring_submit_unlock(ctx, issue_flags); 3393b77495aSJens Axboe if (ret < 0) 3403b77495aSJens Axboe req_set_fail(req); 3413b77495aSJens Axboe io_req_set_res(req, ret, 0); 342c3b49093SPavel Begunkov return IOU_OK; 3433b77495aSJens Axboe } 3443b77495aSJens Axboe 3453b77495aSJens Axboe int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 3463b77495aSJens Axboe { 3473b77495aSJens Axboe unsigned long size, tmp_check; 348f2ccb5aeSStefan Metzmacher struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf); 3493b77495aSJens Axboe u64 tmp; 3503b77495aSJens Axboe 3513b77495aSJens Axboe if (sqe->rw_flags || sqe->splice_fd_in) 3523b77495aSJens Axboe return -EINVAL; 3533b77495aSJens Axboe 3543b77495aSJens Axboe tmp = READ_ONCE(sqe->fd); 35546484864SGabriel Krisman Bertazi if (!tmp || tmp > MAX_BIDS_PER_BGID) 3563b77495aSJens Axboe return -E2BIG; 3573b77495aSJens Axboe p->nbufs = tmp; 3583b77495aSJens Axboe p->addr = READ_ONCE(sqe->addr); 3593b77495aSJens Axboe p->len = READ_ONCE(sqe->len); 3603b77495aSJens Axboe 3613b77495aSJens Axboe if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs, 3623b77495aSJens Axboe &size)) 3633b77495aSJens Axboe return -EOVERFLOW; 3643b77495aSJens Axboe if (check_add_overflow((unsigned long)p->addr, size, &tmp_check)) 3653b77495aSJens Axboe return -EOVERFLOW; 3663b77495aSJens Axboe 3673b77495aSJens Axboe size = (unsigned long)p->len * p->nbufs; 3683b77495aSJens Axboe if (!access_ok(u64_to_user_ptr(p->addr), size)) 3693b77495aSJens Axboe return -EFAULT; 3703b77495aSJens Axboe 3713b77495aSJens Axboe p->bgid = READ_ONCE(sqe->buf_group); 3723b77495aSJens Axboe tmp = READ_ONCE(sqe->off); 3733b77495aSJens Axboe if (tmp > USHRT_MAX) 3743b77495aSJens Axboe return -E2BIG; 37546484864SGabriel Krisman Bertazi if (tmp + p->nbufs > MAX_BIDS_PER_BGID) 3763851d25cSJens Axboe return -EINVAL; 3773b77495aSJens Axboe p->bid = tmp; 3783b77495aSJens Axboe return 0; 3793b77495aSJens Axboe } 3803b77495aSJens Axboe 3813b77495aSJens Axboe static int io_refill_buffer_cache(struct io_ring_ctx *ctx) 3823b77495aSJens Axboe { 3833b77495aSJens Axboe struct io_buffer *buf; 3843b77495aSJens Axboe struct page *page; 3853b77495aSJens Axboe int bufs_in_page; 3863b77495aSJens Axboe 3873b77495aSJens Axboe /* 3883b77495aSJens Axboe * Completions that don't happen inline (eg not under uring_lock) will 3893b77495aSJens Axboe * add to ->io_buffers_comp. If we don't have any free buffers, check 3903b77495aSJens Axboe * the completion list and splice those entries first. 3913b77495aSJens Axboe */ 3923b77495aSJens Axboe if (!list_empty_careful(&ctx->io_buffers_comp)) { 3933b77495aSJens Axboe spin_lock(&ctx->completion_lock); 3943b77495aSJens Axboe if (!list_empty(&ctx->io_buffers_comp)) { 3953b77495aSJens Axboe list_splice_init(&ctx->io_buffers_comp, 3963b77495aSJens Axboe &ctx->io_buffers_cache); 3973b77495aSJens Axboe spin_unlock(&ctx->completion_lock); 3983b77495aSJens Axboe return 0; 3993b77495aSJens Axboe } 4003b77495aSJens Axboe spin_unlock(&ctx->completion_lock); 4013b77495aSJens Axboe } 4023b77495aSJens Axboe 4033b77495aSJens Axboe /* 4043b77495aSJens Axboe * No free buffers and no completion entries either. Allocate a new 4053b77495aSJens Axboe * page worth of buffer entries and add those to our freelist. 4063b77495aSJens Axboe */ 4073b77495aSJens Axboe page = alloc_page(GFP_KERNEL_ACCOUNT); 4083b77495aSJens Axboe if (!page) 4093b77495aSJens Axboe return -ENOMEM; 4103b77495aSJens Axboe 4113b77495aSJens Axboe list_add(&page->lru, &ctx->io_buffers_pages); 4123b77495aSJens Axboe 4133b77495aSJens Axboe buf = page_address(page); 4143b77495aSJens Axboe bufs_in_page = PAGE_SIZE / sizeof(*buf); 4153b77495aSJens Axboe while (bufs_in_page) { 4163b77495aSJens Axboe list_add_tail(&buf->list, &ctx->io_buffers_cache); 4173b77495aSJens Axboe buf++; 4183b77495aSJens Axboe bufs_in_page--; 4193b77495aSJens Axboe } 4203b77495aSJens Axboe 4213b77495aSJens Axboe return 0; 4223b77495aSJens Axboe } 4233b77495aSJens Axboe 4243b77495aSJens Axboe static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf, 4253b77495aSJens Axboe struct io_buffer_list *bl) 4263b77495aSJens Axboe { 4273b77495aSJens Axboe struct io_buffer *buf; 4283b77495aSJens Axboe u64 addr = pbuf->addr; 4293b77495aSJens Axboe int i, bid = pbuf->bid; 4303b77495aSJens Axboe 4313b77495aSJens Axboe for (i = 0; i < pbuf->nbufs; i++) { 4323b77495aSJens Axboe if (list_empty(&ctx->io_buffers_cache) && 4333b77495aSJens Axboe io_refill_buffer_cache(ctx)) 4343b77495aSJens Axboe break; 4353b77495aSJens Axboe buf = list_first_entry(&ctx->io_buffers_cache, struct io_buffer, 4363b77495aSJens Axboe list); 4373b77495aSJens Axboe list_move_tail(&buf->list, &bl->buf_list); 4383b77495aSJens Axboe buf->addr = addr; 4393b77495aSJens Axboe buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT); 4403b77495aSJens Axboe buf->bid = bid; 4413b77495aSJens Axboe buf->bgid = pbuf->bgid; 4423b77495aSJens Axboe addr += pbuf->len; 4433b77495aSJens Axboe bid++; 4443b77495aSJens Axboe cond_resched(); 4453b77495aSJens Axboe } 4463b77495aSJens Axboe 4473b77495aSJens Axboe return i ? 0 : -ENOMEM; 4483b77495aSJens Axboe } 4493b77495aSJens Axboe 4503b77495aSJens Axboe int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) 4513b77495aSJens Axboe { 452f2ccb5aeSStefan Metzmacher struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf); 4533b77495aSJens Axboe struct io_ring_ctx *ctx = req->ctx; 4543b77495aSJens Axboe struct io_buffer_list *bl; 4553b77495aSJens Axboe int ret = 0; 4563b77495aSJens Axboe 4573b77495aSJens Axboe io_ring_submit_lock(ctx, issue_flags); 4583b77495aSJens Axboe 4593b77495aSJens Axboe if (unlikely(p->bgid < BGID_ARRAY && !ctx->io_bl)) { 4603b77495aSJens Axboe ret = io_init_bl_list(ctx); 4613b77495aSJens Axboe if (ret) 4623b77495aSJens Axboe goto err; 4633b77495aSJens Axboe } 4643b77495aSJens Axboe 4653b77495aSJens Axboe bl = io_buffer_get_list(ctx, p->bgid); 4663b77495aSJens Axboe if (unlikely(!bl)) { 467cc18cc5eSPavel Begunkov bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT); 4683b77495aSJens Axboe if (!bl) { 4693b77495aSJens Axboe ret = -ENOMEM; 4703b77495aSJens Axboe goto err; 4713b77495aSJens Axboe } 4723b77495aSJens Axboe INIT_LIST_HEAD(&bl->buf_list); 4733b77495aSJens Axboe ret = io_buffer_add_list(ctx, bl, p->bgid); 4743b77495aSJens Axboe if (ret) { 475*09f75200SJens Axboe /* 476*09f75200SJens Axboe * Doesn't need rcu free as it was never visible, but 477*09f75200SJens Axboe * let's keep it consistent throughout. Also can't 478*09f75200SJens Axboe * be a lower indexed array group, as adding one 479*09f75200SJens Axboe * where lookup failed cannot happen. 480*09f75200SJens Axboe */ 481*09f75200SJens Axboe if (p->bgid >= BGID_ARRAY) 482*09f75200SJens Axboe kfree_rcu(bl, rcu); 483*09f75200SJens Axboe else 484*09f75200SJens Axboe WARN_ON_ONCE(1); 4853b77495aSJens Axboe goto err; 4863b77495aSJens Axboe } 4873b77495aSJens Axboe } 4883b77495aSJens Axboe /* can't add buffers via this command for a mapped buffer ring */ 48925a2c188SJens Axboe if (bl->is_mapped) { 4903b77495aSJens Axboe ret = -EINVAL; 4913b77495aSJens Axboe goto err; 4923b77495aSJens Axboe } 4933b77495aSJens Axboe 4943b77495aSJens Axboe ret = io_add_buffers(ctx, p, bl); 4953b77495aSJens Axboe err: 496c3b49093SPavel Begunkov io_ring_submit_unlock(ctx, issue_flags); 497c3b49093SPavel Begunkov 4983b77495aSJens Axboe if (ret < 0) 4993b77495aSJens Axboe req_set_fail(req); 5003b77495aSJens Axboe io_req_set_res(req, ret, 0); 501c3b49093SPavel Begunkov return IOU_OK; 5023b77495aSJens Axboe } 5033b77495aSJens Axboe 504ba56b632SJens Axboe static int io_pin_pbuf_ring(struct io_uring_buf_reg *reg, 505ba56b632SJens Axboe struct io_buffer_list *bl) 5063b77495aSJens Axboe { 5073b77495aSJens Axboe struct io_uring_buf_ring *br; 5083b77495aSJens Axboe struct page **pages; 509f8024f1fSJens Axboe int i, nr_pages; 5103b77495aSJens Axboe 511ba56b632SJens Axboe pages = io_pin_pages(reg->ring_addr, 512ba56b632SJens Axboe flex_array_size(br, bufs, reg->ring_entries), 513ba56b632SJens Axboe &nr_pages); 514ba56b632SJens Axboe if (IS_ERR(pages)) 515ba56b632SJens Axboe return PTR_ERR(pages); 516ba56b632SJens Axboe 517f8024f1fSJens Axboe /* 518f8024f1fSJens Axboe * Apparently some 32-bit boxes (ARM) will return highmem pages, 519f8024f1fSJens Axboe * which then need to be mapped. We could support that, but it'd 520f8024f1fSJens Axboe * complicate the code and slowdown the common cases quite a bit. 521f8024f1fSJens Axboe * So just error out, returning -EINVAL just like we did on kernels 522f8024f1fSJens Axboe * that didn't support mapped buffer rings. 523f8024f1fSJens Axboe */ 524f8024f1fSJens Axboe for (i = 0; i < nr_pages; i++) 525f8024f1fSJens Axboe if (PageHighMem(pages[i])) 526f8024f1fSJens Axboe goto error_unpin; 527f8024f1fSJens Axboe 528ba56b632SJens Axboe br = page_address(pages[0]); 529fcb46c0cSJens Axboe #ifdef SHM_COLOUR 530fcb46c0cSJens Axboe /* 531fcb46c0cSJens Axboe * On platforms that have specific aliasing requirements, SHM_COLOUR 532fcb46c0cSJens Axboe * is set and we must guarantee that the kernel and user side align 533fcb46c0cSJens Axboe * nicely. We cannot do that if IOU_PBUF_RING_MMAP isn't set and 534fcb46c0cSJens Axboe * the application mmap's the provided ring buffer. Fail the request 535fcb46c0cSJens Axboe * if we, by chance, don't end up with aligned addresses. The app 536fcb46c0cSJens Axboe * should use IOU_PBUF_RING_MMAP instead, and liburing will handle 537fcb46c0cSJens Axboe * this transparently. 538fcb46c0cSJens Axboe */ 539f8024f1fSJens Axboe if ((reg->ring_addr | (unsigned long) br) & (SHM_COLOUR - 1)) 540f8024f1fSJens Axboe goto error_unpin; 541fcb46c0cSJens Axboe #endif 542ba56b632SJens Axboe bl->buf_pages = pages; 543ba56b632SJens Axboe bl->buf_nr_pages = nr_pages; 544ba56b632SJens Axboe bl->buf_ring = br; 54525a2c188SJens Axboe bl->is_mapped = 1; 546c56e022cSJens Axboe bl->is_mmap = 0; 547c56e022cSJens Axboe return 0; 548f8024f1fSJens Axboe error_unpin: 549f8024f1fSJens Axboe for (i = 0; i < nr_pages; i++) 550f8024f1fSJens Axboe unpin_user_page(pages[i]); 551f8024f1fSJens Axboe kvfree(pages); 552f8024f1fSJens Axboe return -EINVAL; 553c56e022cSJens Axboe } 554c56e022cSJens Axboe 555c56e022cSJens Axboe static int io_alloc_pbuf_ring(struct io_uring_buf_reg *reg, 556c56e022cSJens Axboe struct io_buffer_list *bl) 557c56e022cSJens Axboe { 558c56e022cSJens Axboe gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP; 559c56e022cSJens Axboe size_t ring_size; 560c56e022cSJens Axboe void *ptr; 561c56e022cSJens Axboe 562c56e022cSJens Axboe ring_size = reg->ring_entries * sizeof(struct io_uring_buf_ring); 563c56e022cSJens Axboe ptr = (void *) __get_free_pages(gfp, get_order(ring_size)); 564c56e022cSJens Axboe if (!ptr) 565c56e022cSJens Axboe return -ENOMEM; 566c56e022cSJens Axboe 567c56e022cSJens Axboe bl->buf_ring = ptr; 568c56e022cSJens Axboe bl->is_mapped = 1; 569c56e022cSJens Axboe bl->is_mmap = 1; 570ba56b632SJens Axboe return 0; 571ba56b632SJens Axboe } 572ba56b632SJens Axboe 573ba56b632SJens Axboe int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) 574ba56b632SJens Axboe { 575ba56b632SJens Axboe struct io_uring_buf_reg reg; 576ba56b632SJens Axboe struct io_buffer_list *bl, *free_bl = NULL; 577ba56b632SJens Axboe int ret; 578ba56b632SJens Axboe 579*09f75200SJens Axboe lockdep_assert_held(&ctx->uring_lock); 580*09f75200SJens Axboe 5813b77495aSJens Axboe if (copy_from_user(®, arg, sizeof(reg))) 5823b77495aSJens Axboe return -EFAULT; 5833b77495aSJens Axboe 58481cf17cdSJens Axboe if (reg.resv[0] || reg.resv[1] || reg.resv[2]) 5853b77495aSJens Axboe return -EINVAL; 586c56e022cSJens Axboe if (reg.flags & ~IOU_PBUF_RING_MMAP) 5873b77495aSJens Axboe return -EINVAL; 588c56e022cSJens Axboe if (!(reg.flags & IOU_PBUF_RING_MMAP)) { 5893b77495aSJens Axboe if (!reg.ring_addr) 5903b77495aSJens Axboe return -EFAULT; 5913b77495aSJens Axboe if (reg.ring_addr & ~PAGE_MASK) 5923b77495aSJens Axboe return -EINVAL; 593c56e022cSJens Axboe } else { 594c56e022cSJens Axboe if (reg.ring_addr) 595c56e022cSJens Axboe return -EINVAL; 596c56e022cSJens Axboe } 597c56e022cSJens Axboe 5983b77495aSJens Axboe if (!is_power_of_2(reg.ring_entries)) 5993b77495aSJens Axboe return -EINVAL; 6003b77495aSJens Axboe 6013b77495aSJens Axboe /* cannot disambiguate full vs empty due to head/tail size */ 6023b77495aSJens Axboe if (reg.ring_entries >= 65536) 6033b77495aSJens Axboe return -EINVAL; 6043b77495aSJens Axboe 6053b77495aSJens Axboe if (unlikely(reg.bgid < BGID_ARRAY && !ctx->io_bl)) { 6063b77495aSJens Axboe int ret = io_init_bl_list(ctx); 6073b77495aSJens Axboe if (ret) 6083b77495aSJens Axboe return ret; 6093b77495aSJens Axboe } 6103b77495aSJens Axboe 6113b77495aSJens Axboe bl = io_buffer_get_list(ctx, reg.bgid); 6123b77495aSJens Axboe if (bl) { 6133b77495aSJens Axboe /* if mapped buffer ring OR classic exists, don't allow */ 61425a2c188SJens Axboe if (bl->is_mapped || !list_empty(&bl->buf_list)) 6153b77495aSJens Axboe return -EEXIST; 6163b77495aSJens Axboe } else { 6173b77495aSJens Axboe free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL); 6183b77495aSJens Axboe if (!bl) 6193b77495aSJens Axboe return -ENOMEM; 6203b77495aSJens Axboe } 6213b77495aSJens Axboe 622c56e022cSJens Axboe if (!(reg.flags & IOU_PBUF_RING_MMAP)) 623ba56b632SJens Axboe ret = io_pin_pbuf_ring(®, bl); 624c56e022cSJens Axboe else 625c56e022cSJens Axboe ret = io_alloc_pbuf_ring(®, bl); 6263b77495aSJens Axboe 627c56e022cSJens Axboe if (!ret) { 6283b77495aSJens Axboe bl->nr_entries = reg.ring_entries; 6293b77495aSJens Axboe bl->mask = reg.ring_entries - 1; 630ba56b632SJens Axboe 6313b77495aSJens Axboe io_buffer_add_list(ctx, bl, reg.bgid); 6323b77495aSJens Axboe return 0; 6333b77495aSJens Axboe } 6343b77495aSJens Axboe 635*09f75200SJens Axboe kfree_rcu(free_bl, rcu); 636c56e022cSJens Axboe return ret; 637c56e022cSJens Axboe } 638c56e022cSJens Axboe 6393b77495aSJens Axboe int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) 6403b77495aSJens Axboe { 6413b77495aSJens Axboe struct io_uring_buf_reg reg; 6423b77495aSJens Axboe struct io_buffer_list *bl; 6433b77495aSJens Axboe 644*09f75200SJens Axboe lockdep_assert_held(&ctx->uring_lock); 645*09f75200SJens Axboe 6463b77495aSJens Axboe if (copy_from_user(®, arg, sizeof(reg))) 6473b77495aSJens Axboe return -EFAULT; 64881cf17cdSJens Axboe if (reg.resv[0] || reg.resv[1] || reg.resv[2]) 64981cf17cdSJens Axboe return -EINVAL; 65081cf17cdSJens Axboe if (reg.flags) 6513b77495aSJens Axboe return -EINVAL; 6523b77495aSJens Axboe 6533b77495aSJens Axboe bl = io_buffer_get_list(ctx, reg.bgid); 6543b77495aSJens Axboe if (!bl) 6553b77495aSJens Axboe return -ENOENT; 65625a2c188SJens Axboe if (!bl->is_mapped) 6573b77495aSJens Axboe return -EINVAL; 6583b77495aSJens Axboe 6593b77495aSJens Axboe __io_remove_buffers(ctx, bl, -1U); 6603b77495aSJens Axboe if (bl->bgid >= BGID_ARRAY) { 6613b77495aSJens Axboe xa_erase(&ctx->io_bl_xa, bl->bgid); 662*09f75200SJens Axboe kfree_rcu(bl, rcu); 6633b77495aSJens Axboe } 6643b77495aSJens Axboe return 0; 6653b77495aSJens Axboe } 666c56e022cSJens Axboe 667c56e022cSJens Axboe void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid) 668c56e022cSJens Axboe { 669c56e022cSJens Axboe struct io_buffer_list *bl; 670c56e022cSJens Axboe 671*09f75200SJens Axboe bl = __io_buffer_get_list(ctx, smp_load_acquire(&ctx->io_bl), bgid); 672*09f75200SJens Axboe 673*09f75200SJens Axboe /* 674*09f75200SJens Axboe * Ensure the list is fully setup. Only strictly needed for RCU lookup 675*09f75200SJens Axboe * via mmap, and in that case only for the array indexed groups. For 676*09f75200SJens Axboe * the xarray lookups, it's either visible and ready, or not at all. 677*09f75200SJens Axboe */ 678*09f75200SJens Axboe if (!smp_load_acquire(&bl->is_ready)) 679*09f75200SJens Axboe return NULL; 680c56e022cSJens Axboe if (!bl || !bl->is_mmap) 681c56e022cSJens Axboe return NULL; 682c56e022cSJens Axboe 683c56e022cSJens Axboe return bl->buf_ring; 684c56e022cSJens Axboe } 685