1f3b44f92SJens Axboe // SPDX-License-Identifier: GPL-2.0 2f3b44f92SJens Axboe #include <linux/kernel.h> 3f3b44f92SJens Axboe #include <linux/errno.h> 4f3b44f92SJens Axboe #include <linux/fs.h> 5f3b44f92SJens Axboe #include <linux/file.h> 6f3b44f92SJens Axboe #include <linux/blk-mq.h> 7f3b44f92SJens Axboe #include <linux/mm.h> 8f3b44f92SJens Axboe #include <linux/slab.h> 9f3b44f92SJens Axboe #include <linux/fsnotify.h> 10f3b44f92SJens Axboe #include <linux/poll.h> 11f3b44f92SJens Axboe #include <linux/nospec.h> 12f3b44f92SJens Axboe #include <linux/compat.h> 13f3b44f92SJens Axboe #include <linux/io_uring.h> 14f3b44f92SJens Axboe 15f3b44f92SJens Axboe #include <uapi/linux/io_uring.h> 16f3b44f92SJens Axboe 17f3b44f92SJens Axboe #include "io_uring.h" 18f3b44f92SJens Axboe #include "opdef.h" 19f3b44f92SJens Axboe #include "kbuf.h" 20f3b44f92SJens Axboe #include "rsrc.h" 21f3b44f92SJens Axboe #include "rw.h" 22f3b44f92SJens Axboe 23f3b44f92SJens Axboe struct io_rw { 24f3b44f92SJens Axboe /* NOTE: kiocb has the file as the first member, so don't do it here */ 25f3b44f92SJens Axboe struct kiocb kiocb; 26f3b44f92SJens Axboe u64 addr; 27f3b44f92SJens Axboe u32 len; 28f3b44f92SJens Axboe rwf_t flags; 29f3b44f92SJens Axboe }; 30f3b44f92SJens Axboe 31f3b44f92SJens Axboe static inline bool io_file_supports_nowait(struct io_kiocb *req) 32f3b44f92SJens Axboe { 33f3b44f92SJens Axboe return req->flags & REQ_F_SUPPORT_NOWAIT; 34f3b44f92SJens Axboe } 35f3b44f92SJens Axboe 36f3b44f92SJens Axboe int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) 37f3b44f92SJens Axboe { 38f3b44f92SJens Axboe struct io_rw *rw = io_kiocb_to_cmd(req); 39f3b44f92SJens Axboe unsigned ioprio; 40f3b44f92SJens Axboe int ret; 41f3b44f92SJens Axboe 42f3b44f92SJens Axboe rw->kiocb.ki_pos = READ_ONCE(sqe->off); 43f3b44f92SJens Axboe /* used for fixed read/write too - just read unconditionally */ 44f3b44f92SJens Axboe req->buf_index = READ_ONCE(sqe->buf_index); 45f3b44f92SJens Axboe 46f3b44f92SJens Axboe if (req->opcode == IORING_OP_READ_FIXED || 47f3b44f92SJens Axboe req->opcode == IORING_OP_WRITE_FIXED) { 48f3b44f92SJens Axboe struct io_ring_ctx *ctx = req->ctx; 49f3b44f92SJens Axboe u16 index; 50f3b44f92SJens Axboe 51f3b44f92SJens Axboe if (unlikely(req->buf_index >= ctx->nr_user_bufs)) 52f3b44f92SJens Axboe return -EFAULT; 53f3b44f92SJens Axboe index = array_index_nospec(req->buf_index, ctx->nr_user_bufs); 54f3b44f92SJens Axboe req->imu = ctx->user_bufs[index]; 55f3b44f92SJens Axboe io_req_set_rsrc_node(req, ctx, 0); 56f3b44f92SJens Axboe } 57f3b44f92SJens Axboe 58f3b44f92SJens Axboe ioprio = READ_ONCE(sqe->ioprio); 59f3b44f92SJens Axboe if (ioprio) { 60f3b44f92SJens Axboe ret = ioprio_check_cap(ioprio); 61f3b44f92SJens Axboe if (ret) 62f3b44f92SJens Axboe return ret; 63f3b44f92SJens Axboe 64f3b44f92SJens Axboe rw->kiocb.ki_ioprio = ioprio; 65f3b44f92SJens Axboe } else { 66f3b44f92SJens Axboe rw->kiocb.ki_ioprio = get_current_ioprio(); 67f3b44f92SJens Axboe } 68f3b44f92SJens Axboe 69f3b44f92SJens Axboe rw->addr = READ_ONCE(sqe->addr); 70f3b44f92SJens Axboe rw->len = READ_ONCE(sqe->len); 71f3b44f92SJens Axboe rw->flags = READ_ONCE(sqe->rw_flags); 72f3b44f92SJens Axboe return 0; 73f3b44f92SJens Axboe } 74f3b44f92SJens Axboe 75f3b44f92SJens Axboe void io_readv_writev_cleanup(struct io_kiocb *req) 76f3b44f92SJens Axboe { 77f3b44f92SJens Axboe struct io_async_rw *io = req->async_data; 78f3b44f92SJens Axboe 79f3b44f92SJens Axboe kfree(io->free_iovec); 80f3b44f92SJens Axboe } 81f3b44f92SJens Axboe 82f3b44f92SJens Axboe static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret) 83f3b44f92SJens Axboe { 84f3b44f92SJens Axboe switch (ret) { 85f3b44f92SJens Axboe case -EIOCBQUEUED: 86f3b44f92SJens Axboe break; 87f3b44f92SJens Axboe case -ERESTARTSYS: 88f3b44f92SJens Axboe case -ERESTARTNOINTR: 89f3b44f92SJens Axboe case -ERESTARTNOHAND: 90f3b44f92SJens Axboe case -ERESTART_RESTARTBLOCK: 91f3b44f92SJens Axboe /* 92f3b44f92SJens Axboe * We can't just restart the syscall, since previously 93f3b44f92SJens Axboe * submitted sqes may already be in progress. Just fail this 94f3b44f92SJens Axboe * IO with EINTR. 95f3b44f92SJens Axboe */ 96f3b44f92SJens Axboe ret = -EINTR; 97f3b44f92SJens Axboe fallthrough; 98f3b44f92SJens Axboe default: 99f3b44f92SJens Axboe kiocb->ki_complete(kiocb, ret); 100f3b44f92SJens Axboe } 101f3b44f92SJens Axboe } 102f3b44f92SJens Axboe 103f3b44f92SJens Axboe static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req) 104f3b44f92SJens Axboe { 105f3b44f92SJens Axboe struct io_rw *rw = io_kiocb_to_cmd(req); 106f3b44f92SJens Axboe 107f3b44f92SJens Axboe if (rw->kiocb.ki_pos != -1) 108f3b44f92SJens Axboe return &rw->kiocb.ki_pos; 109f3b44f92SJens Axboe 110f3b44f92SJens Axboe if (!(req->file->f_mode & FMODE_STREAM)) { 111f3b44f92SJens Axboe req->flags |= REQ_F_CUR_POS; 112f3b44f92SJens Axboe rw->kiocb.ki_pos = req->file->f_pos; 113f3b44f92SJens Axboe return &rw->kiocb.ki_pos; 114f3b44f92SJens Axboe } 115f3b44f92SJens Axboe 116f3b44f92SJens Axboe rw->kiocb.ki_pos = 0; 117f3b44f92SJens Axboe return NULL; 118f3b44f92SJens Axboe } 119f3b44f92SJens Axboe 120f3b44f92SJens Axboe static void io_req_task_queue_reissue(struct io_kiocb *req) 121f3b44f92SJens Axboe { 122f3b44f92SJens Axboe req->io_task_work.func = io_queue_iowq; 123f3b44f92SJens Axboe io_req_task_work_add(req); 124f3b44f92SJens Axboe } 125f3b44f92SJens Axboe 126f3b44f92SJens Axboe #ifdef CONFIG_BLOCK 127f3b44f92SJens Axboe static bool io_resubmit_prep(struct io_kiocb *req) 128f3b44f92SJens Axboe { 129f3b44f92SJens Axboe struct io_async_rw *io = req->async_data; 130f3b44f92SJens Axboe 131f3b44f92SJens Axboe if (!req_has_async_data(req)) 132f3b44f92SJens Axboe return !io_req_prep_async(req); 133f3b44f92SJens Axboe iov_iter_restore(&io->s.iter, &io->s.iter_state); 134f3b44f92SJens Axboe return true; 135f3b44f92SJens Axboe } 136f3b44f92SJens Axboe 137f3b44f92SJens Axboe static bool io_rw_should_reissue(struct io_kiocb *req) 138f3b44f92SJens Axboe { 139f3b44f92SJens Axboe umode_t mode = file_inode(req->file)->i_mode; 140f3b44f92SJens Axboe struct io_ring_ctx *ctx = req->ctx; 141f3b44f92SJens Axboe 142f3b44f92SJens Axboe if (!S_ISBLK(mode) && !S_ISREG(mode)) 143f3b44f92SJens Axboe return false; 144f3b44f92SJens Axboe if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() && 145f3b44f92SJens Axboe !(ctx->flags & IORING_SETUP_IOPOLL))) 146f3b44f92SJens Axboe return false; 147f3b44f92SJens Axboe /* 148f3b44f92SJens Axboe * If ref is dying, we might be running poll reap from the exit work. 149f3b44f92SJens Axboe * Don't attempt to reissue from that path, just let it fail with 150f3b44f92SJens Axboe * -EAGAIN. 151f3b44f92SJens Axboe */ 152f3b44f92SJens Axboe if (percpu_ref_is_dying(&ctx->refs)) 153f3b44f92SJens Axboe return false; 154f3b44f92SJens Axboe /* 155f3b44f92SJens Axboe * Play it safe and assume not safe to re-import and reissue if we're 156f3b44f92SJens Axboe * not in the original thread group (or in task context). 157f3b44f92SJens Axboe */ 158f3b44f92SJens Axboe if (!same_thread_group(req->task, current) || !in_task()) 159f3b44f92SJens Axboe return false; 160f3b44f92SJens Axboe return true; 161f3b44f92SJens Axboe } 162f3b44f92SJens Axboe #else 163f3b44f92SJens Axboe static bool io_resubmit_prep(struct io_kiocb *req) 164f3b44f92SJens Axboe { 165f3b44f92SJens Axboe return false; 166f3b44f92SJens Axboe } 167f3b44f92SJens Axboe static bool io_rw_should_reissue(struct io_kiocb *req) 168f3b44f92SJens Axboe { 169f3b44f92SJens Axboe return false; 170f3b44f92SJens Axboe } 171f3b44f92SJens Axboe #endif 172f3b44f92SJens Axboe 173f3b44f92SJens Axboe static void kiocb_end_write(struct io_kiocb *req) 174f3b44f92SJens Axboe { 175f3b44f92SJens Axboe /* 176f3b44f92SJens Axboe * Tell lockdep we inherited freeze protection from submission 177f3b44f92SJens Axboe * thread. 178f3b44f92SJens Axboe */ 179f3b44f92SJens Axboe if (req->flags & REQ_F_ISREG) { 180f3b44f92SJens Axboe struct super_block *sb = file_inode(req->file)->i_sb; 181f3b44f92SJens Axboe 182f3b44f92SJens Axboe __sb_writers_acquired(sb, SB_FREEZE_WRITE); 183f3b44f92SJens Axboe sb_end_write(sb); 184f3b44f92SJens Axboe } 185f3b44f92SJens Axboe } 186f3b44f92SJens Axboe 187f3b44f92SJens Axboe static bool __io_complete_rw_common(struct io_kiocb *req, long res) 188f3b44f92SJens Axboe { 189f3b44f92SJens Axboe struct io_rw *rw = io_kiocb_to_cmd(req); 190f3b44f92SJens Axboe 191f3b44f92SJens Axboe if (rw->kiocb.ki_flags & IOCB_WRITE) { 192f3b44f92SJens Axboe kiocb_end_write(req); 193f3b44f92SJens Axboe fsnotify_modify(req->file); 194f3b44f92SJens Axboe } else { 195f3b44f92SJens Axboe fsnotify_access(req->file); 196f3b44f92SJens Axboe } 197f3b44f92SJens Axboe if (unlikely(res != req->cqe.res)) { 198f3b44f92SJens Axboe if ((res == -EAGAIN || res == -EOPNOTSUPP) && 199f3b44f92SJens Axboe io_rw_should_reissue(req)) { 200f3b44f92SJens Axboe req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO; 201f3b44f92SJens Axboe return true; 202f3b44f92SJens Axboe } 203f3b44f92SJens Axboe req_set_fail(req); 204f3b44f92SJens Axboe req->cqe.res = res; 205f3b44f92SJens Axboe } 206f3b44f92SJens Axboe return false; 207f3b44f92SJens Axboe } 208f3b44f92SJens Axboe 209f3b44f92SJens Axboe static void io_complete_rw(struct kiocb *kiocb, long res) 210f3b44f92SJens Axboe { 211f3b44f92SJens Axboe struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb); 212f3b44f92SJens Axboe struct io_kiocb *req = cmd_to_io_kiocb(rw); 213f3b44f92SJens Axboe 214f3b44f92SJens Axboe if (__io_complete_rw_common(req, res)) 215f3b44f92SJens Axboe return; 216f3b44f92SJens Axboe io_req_set_res(req, res, 0); 217f3b44f92SJens Axboe req->io_task_work.func = io_req_task_complete; 218f3b44f92SJens Axboe io_req_task_prio_work_add(req); 219f3b44f92SJens Axboe } 220f3b44f92SJens Axboe 221f3b44f92SJens Axboe static void io_complete_rw_iopoll(struct kiocb *kiocb, long res) 222f3b44f92SJens Axboe { 223f3b44f92SJens Axboe struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb); 224f3b44f92SJens Axboe struct io_kiocb *req = cmd_to_io_kiocb(rw); 225f3b44f92SJens Axboe 226f3b44f92SJens Axboe if (kiocb->ki_flags & IOCB_WRITE) 227f3b44f92SJens Axboe kiocb_end_write(req); 228f3b44f92SJens Axboe if (unlikely(res != req->cqe.res)) { 229f3b44f92SJens Axboe if (res == -EAGAIN && io_rw_should_reissue(req)) { 230f3b44f92SJens Axboe req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO; 231f3b44f92SJens Axboe return; 232f3b44f92SJens Axboe } 233f3b44f92SJens Axboe req->cqe.res = res; 234f3b44f92SJens Axboe } 235f3b44f92SJens Axboe 236f3b44f92SJens Axboe /* order with io_iopoll_complete() checking ->iopoll_completed */ 237f3b44f92SJens Axboe smp_store_release(&req->iopoll_completed, 1); 238f3b44f92SJens Axboe } 239f3b44f92SJens Axboe 240df9830d8SPavel Begunkov static int kiocb_done(struct io_kiocb *req, ssize_t ret, 241f3b44f92SJens Axboe unsigned int issue_flags) 242f3b44f92SJens Axboe { 243f3b44f92SJens Axboe struct io_async_rw *io = req->async_data; 244f3b44f92SJens Axboe struct io_rw *rw = io_kiocb_to_cmd(req); 245f3b44f92SJens Axboe 246f3b44f92SJens Axboe /* add previously done IO, if any */ 247f3b44f92SJens Axboe if (req_has_async_data(req) && io->bytes_done > 0) { 248f3b44f92SJens Axboe if (ret < 0) 249f3b44f92SJens Axboe ret = io->bytes_done; 250f3b44f92SJens Axboe else 251f3b44f92SJens Axboe ret += io->bytes_done; 252f3b44f92SJens Axboe } 253f3b44f92SJens Axboe 254f3b44f92SJens Axboe if (req->flags & REQ_F_CUR_POS) 255f3b44f92SJens Axboe req->file->f_pos = rw->kiocb.ki_pos; 256df9830d8SPavel Begunkov if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) { 257df9830d8SPavel Begunkov if (!__io_complete_rw_common(req, ret)) { 258df9830d8SPavel Begunkov io_req_set_res(req, req->cqe.res, 259df9830d8SPavel Begunkov io_put_kbuf(req, issue_flags)); 260df9830d8SPavel Begunkov return IOU_OK; 261df9830d8SPavel Begunkov } 262df9830d8SPavel Begunkov } else { 263f3b44f92SJens Axboe io_rw_done(&rw->kiocb, ret); 264df9830d8SPavel Begunkov } 265f3b44f92SJens Axboe 266f3b44f92SJens Axboe if (req->flags & REQ_F_REISSUE) { 267f3b44f92SJens Axboe req->flags &= ~REQ_F_REISSUE; 268f3b44f92SJens Axboe if (io_resubmit_prep(req)) 269f3b44f92SJens Axboe io_req_task_queue_reissue(req); 270f3b44f92SJens Axboe else 271f3b44f92SJens Axboe io_req_task_queue_fail(req, ret); 272f3b44f92SJens Axboe } 273df9830d8SPavel Begunkov return IOU_ISSUE_SKIP_COMPLETE; 274f3b44f92SJens Axboe } 275f3b44f92SJens Axboe 276f3b44f92SJens Axboe static int __io_import_fixed(struct io_kiocb *req, int ddir, 277f3b44f92SJens Axboe struct iov_iter *iter, struct io_mapped_ubuf *imu) 278f3b44f92SJens Axboe { 279f3b44f92SJens Axboe struct io_rw *rw = io_kiocb_to_cmd(req); 280f3b44f92SJens Axboe size_t len = rw->len; 281f3b44f92SJens Axboe u64 buf_end, buf_addr = rw->addr; 282f3b44f92SJens Axboe size_t offset; 283f3b44f92SJens Axboe 284f3b44f92SJens Axboe if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end))) 285f3b44f92SJens Axboe return -EFAULT; 286f3b44f92SJens Axboe /* not inside the mapped region */ 287f3b44f92SJens Axboe if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end)) 288f3b44f92SJens Axboe return -EFAULT; 289f3b44f92SJens Axboe 290f3b44f92SJens Axboe /* 291f3b44f92SJens Axboe * May not be a start of buffer, set size appropriately 292f3b44f92SJens Axboe * and advance us to the beginning. 293f3b44f92SJens Axboe */ 294f3b44f92SJens Axboe offset = buf_addr - imu->ubuf; 295f3b44f92SJens Axboe iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len); 296f3b44f92SJens Axboe 297f3b44f92SJens Axboe if (offset) { 298f3b44f92SJens Axboe /* 299f3b44f92SJens Axboe * Don't use iov_iter_advance() here, as it's really slow for 300f3b44f92SJens Axboe * using the latter parts of a big fixed buffer - it iterates 301f3b44f92SJens Axboe * over each segment manually. We can cheat a bit here, because 302f3b44f92SJens Axboe * we know that: 303f3b44f92SJens Axboe * 304f3b44f92SJens Axboe * 1) it's a BVEC iter, we set it up 305f3b44f92SJens Axboe * 2) all bvecs are PAGE_SIZE in size, except potentially the 306f3b44f92SJens Axboe * first and last bvec 307f3b44f92SJens Axboe * 308f3b44f92SJens Axboe * So just find our index, and adjust the iterator afterwards. 309f3b44f92SJens Axboe * If the offset is within the first bvec (or the whole first 310f3b44f92SJens Axboe * bvec, just use iov_iter_advance(). This makes it easier 311f3b44f92SJens Axboe * since we can just skip the first segment, which may not 312f3b44f92SJens Axboe * be PAGE_SIZE aligned. 313f3b44f92SJens Axboe */ 314f3b44f92SJens Axboe const struct bio_vec *bvec = imu->bvec; 315f3b44f92SJens Axboe 316f3b44f92SJens Axboe if (offset <= bvec->bv_len) { 317f3b44f92SJens Axboe iov_iter_advance(iter, offset); 318f3b44f92SJens Axboe } else { 319f3b44f92SJens Axboe unsigned long seg_skip; 320f3b44f92SJens Axboe 321f3b44f92SJens Axboe /* skip first vec */ 322f3b44f92SJens Axboe offset -= bvec->bv_len; 323f3b44f92SJens Axboe seg_skip = 1 + (offset >> PAGE_SHIFT); 324f3b44f92SJens Axboe 325f3b44f92SJens Axboe iter->bvec = bvec + seg_skip; 326f3b44f92SJens Axboe iter->nr_segs -= seg_skip; 327f3b44f92SJens Axboe iter->count -= bvec->bv_len + offset; 328f3b44f92SJens Axboe iter->iov_offset = offset & ~PAGE_MASK; 329f3b44f92SJens Axboe } 330f3b44f92SJens Axboe } 331f3b44f92SJens Axboe 332f3b44f92SJens Axboe return 0; 333f3b44f92SJens Axboe } 334f3b44f92SJens Axboe 335f3b44f92SJens Axboe static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter, 336f3b44f92SJens Axboe unsigned int issue_flags) 337f3b44f92SJens Axboe { 338f3b44f92SJens Axboe if (WARN_ON_ONCE(!req->imu)) 339f3b44f92SJens Axboe return -EFAULT; 340f3b44f92SJens Axboe return __io_import_fixed(req, rw, iter, req->imu); 341f3b44f92SJens Axboe } 342f3b44f92SJens Axboe 343f3b44f92SJens Axboe #ifdef CONFIG_COMPAT 344f3b44f92SJens Axboe static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov, 345f3b44f92SJens Axboe unsigned int issue_flags) 346f3b44f92SJens Axboe { 347f3b44f92SJens Axboe struct io_rw *rw = io_kiocb_to_cmd(req); 348f3b44f92SJens Axboe struct compat_iovec __user *uiov; 349f3b44f92SJens Axboe compat_ssize_t clen; 350f3b44f92SJens Axboe void __user *buf; 351f3b44f92SJens Axboe size_t len; 352f3b44f92SJens Axboe 353f3b44f92SJens Axboe uiov = u64_to_user_ptr(rw->addr); 354f3b44f92SJens Axboe if (!access_ok(uiov, sizeof(*uiov))) 355f3b44f92SJens Axboe return -EFAULT; 356f3b44f92SJens Axboe if (__get_user(clen, &uiov->iov_len)) 357f3b44f92SJens Axboe return -EFAULT; 358f3b44f92SJens Axboe if (clen < 0) 359f3b44f92SJens Axboe return -EINVAL; 360f3b44f92SJens Axboe 361f3b44f92SJens Axboe len = clen; 362f3b44f92SJens Axboe buf = io_buffer_select(req, &len, issue_flags); 363f3b44f92SJens Axboe if (!buf) 364f3b44f92SJens Axboe return -ENOBUFS; 365f3b44f92SJens Axboe rw->addr = (unsigned long) buf; 366f3b44f92SJens Axboe iov[0].iov_base = buf; 367f3b44f92SJens Axboe rw->len = iov[0].iov_len = (compat_size_t) len; 368f3b44f92SJens Axboe return 0; 369f3b44f92SJens Axboe } 370f3b44f92SJens Axboe #endif 371f3b44f92SJens Axboe 372f3b44f92SJens Axboe static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, 373f3b44f92SJens Axboe unsigned int issue_flags) 374f3b44f92SJens Axboe { 375f3b44f92SJens Axboe struct io_rw *rw = io_kiocb_to_cmd(req); 376f3b44f92SJens Axboe struct iovec __user *uiov = u64_to_user_ptr(rw->addr); 377f3b44f92SJens Axboe void __user *buf; 378f3b44f92SJens Axboe ssize_t len; 379f3b44f92SJens Axboe 380f3b44f92SJens Axboe if (copy_from_user(iov, uiov, sizeof(*uiov))) 381f3b44f92SJens Axboe return -EFAULT; 382f3b44f92SJens Axboe 383f3b44f92SJens Axboe len = iov[0].iov_len; 384f3b44f92SJens Axboe if (len < 0) 385f3b44f92SJens Axboe return -EINVAL; 386f3b44f92SJens Axboe buf = io_buffer_select(req, &len, issue_flags); 387f3b44f92SJens Axboe if (!buf) 388f3b44f92SJens Axboe return -ENOBUFS; 389f3b44f92SJens Axboe rw->addr = (unsigned long) buf; 390f3b44f92SJens Axboe iov[0].iov_base = buf; 391f3b44f92SJens Axboe rw->len = iov[0].iov_len = len; 392f3b44f92SJens Axboe return 0; 393f3b44f92SJens Axboe } 394f3b44f92SJens Axboe 395f3b44f92SJens Axboe static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, 396f3b44f92SJens Axboe unsigned int issue_flags) 397f3b44f92SJens Axboe { 398f3b44f92SJens Axboe struct io_rw *rw = io_kiocb_to_cmd(req); 399f3b44f92SJens Axboe 400f3b44f92SJens Axboe if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) { 401f3b44f92SJens Axboe iov[0].iov_base = u64_to_user_ptr(rw->addr); 402f3b44f92SJens Axboe iov[0].iov_len = rw->len; 403f3b44f92SJens Axboe return 0; 404f3b44f92SJens Axboe } 405f3b44f92SJens Axboe if (rw->len != 1) 406f3b44f92SJens Axboe return -EINVAL; 407f3b44f92SJens Axboe 408f3b44f92SJens Axboe #ifdef CONFIG_COMPAT 409f3b44f92SJens Axboe if (req->ctx->compat) 410f3b44f92SJens Axboe return io_compat_import(req, iov, issue_flags); 411f3b44f92SJens Axboe #endif 412f3b44f92SJens Axboe 413f3b44f92SJens Axboe return __io_iov_buffer_select(req, iov, issue_flags); 414f3b44f92SJens Axboe } 415f3b44f92SJens Axboe 416f3b44f92SJens Axboe static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req, 417f3b44f92SJens Axboe struct io_rw_state *s, 418f3b44f92SJens Axboe unsigned int issue_flags) 419f3b44f92SJens Axboe { 420f3b44f92SJens Axboe struct io_rw *rw = io_kiocb_to_cmd(req); 421f3b44f92SJens Axboe struct iov_iter *iter = &s->iter; 422f3b44f92SJens Axboe u8 opcode = req->opcode; 423f3b44f92SJens Axboe struct iovec *iovec; 424f3b44f92SJens Axboe void __user *buf; 425f3b44f92SJens Axboe size_t sqe_len; 426f3b44f92SJens Axboe ssize_t ret; 427f3b44f92SJens Axboe 428f3b44f92SJens Axboe if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) { 429f3b44f92SJens Axboe ret = io_import_fixed(req, ddir, iter, issue_flags); 430f3b44f92SJens Axboe if (ret) 431f3b44f92SJens Axboe return ERR_PTR(ret); 432f3b44f92SJens Axboe return NULL; 433f3b44f92SJens Axboe } 434f3b44f92SJens Axboe 435f3b44f92SJens Axboe buf = u64_to_user_ptr(rw->addr); 436f3b44f92SJens Axboe sqe_len = rw->len; 437f3b44f92SJens Axboe 438f3b44f92SJens Axboe if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) { 439f3b44f92SJens Axboe if (io_do_buffer_select(req)) { 440f3b44f92SJens Axboe buf = io_buffer_select(req, &sqe_len, issue_flags); 441f3b44f92SJens Axboe if (!buf) 442f3b44f92SJens Axboe return ERR_PTR(-ENOBUFS); 443f3b44f92SJens Axboe rw->addr = (unsigned long) buf; 444f3b44f92SJens Axboe rw->len = sqe_len; 445f3b44f92SJens Axboe } 446f3b44f92SJens Axboe 447f3b44f92SJens Axboe ret = import_single_range(ddir, buf, sqe_len, s->fast_iov, iter); 448f3b44f92SJens Axboe if (ret) 449f3b44f92SJens Axboe return ERR_PTR(ret); 450f3b44f92SJens Axboe return NULL; 451f3b44f92SJens Axboe } 452f3b44f92SJens Axboe 453f3b44f92SJens Axboe iovec = s->fast_iov; 454f3b44f92SJens Axboe if (req->flags & REQ_F_BUFFER_SELECT) { 455f3b44f92SJens Axboe ret = io_iov_buffer_select(req, iovec, issue_flags); 456f3b44f92SJens Axboe if (ret) 457f3b44f92SJens Axboe return ERR_PTR(ret); 458f3b44f92SJens Axboe iov_iter_init(iter, ddir, iovec, 1, iovec->iov_len); 459f3b44f92SJens Axboe return NULL; 460f3b44f92SJens Axboe } 461f3b44f92SJens Axboe 462f3b44f92SJens Axboe ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter, 463f3b44f92SJens Axboe req->ctx->compat); 464f3b44f92SJens Axboe if (unlikely(ret < 0)) 465f3b44f92SJens Axboe return ERR_PTR(ret); 466f3b44f92SJens Axboe return iovec; 467f3b44f92SJens Axboe } 468f3b44f92SJens Axboe 469f3b44f92SJens Axboe static inline int io_import_iovec(int rw, struct io_kiocb *req, 470f3b44f92SJens Axboe struct iovec **iovec, struct io_rw_state *s, 471f3b44f92SJens Axboe unsigned int issue_flags) 472f3b44f92SJens Axboe { 473f3b44f92SJens Axboe *iovec = __io_import_iovec(rw, req, s, issue_flags); 474f3b44f92SJens Axboe if (unlikely(IS_ERR(*iovec))) 475f3b44f92SJens Axboe return PTR_ERR(*iovec); 476f3b44f92SJens Axboe 477f3b44f92SJens Axboe iov_iter_save_state(&s->iter, &s->iter_state); 478f3b44f92SJens Axboe return 0; 479f3b44f92SJens Axboe } 480f3b44f92SJens Axboe 481f3b44f92SJens Axboe static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb) 482f3b44f92SJens Axboe { 483f3b44f92SJens Axboe return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos; 484f3b44f92SJens Axboe } 485f3b44f92SJens Axboe 486f3b44f92SJens Axboe /* 487f3b44f92SJens Axboe * For files that don't have ->read_iter() and ->write_iter(), handle them 488f3b44f92SJens Axboe * by looping over ->read() or ->write() manually. 489f3b44f92SJens Axboe */ 490f3b44f92SJens Axboe static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter) 491f3b44f92SJens Axboe { 492f3b44f92SJens Axboe struct kiocb *kiocb = &rw->kiocb; 493f3b44f92SJens Axboe struct file *file = kiocb->ki_filp; 494f3b44f92SJens Axboe ssize_t ret = 0; 495f3b44f92SJens Axboe loff_t *ppos; 496f3b44f92SJens Axboe 497f3b44f92SJens Axboe /* 498f3b44f92SJens Axboe * Don't support polled IO through this interface, and we can't 499f3b44f92SJens Axboe * support non-blocking either. For the latter, this just causes 500f3b44f92SJens Axboe * the kiocb to be handled from an async context. 501f3b44f92SJens Axboe */ 502f3b44f92SJens Axboe if (kiocb->ki_flags & IOCB_HIPRI) 503f3b44f92SJens Axboe return -EOPNOTSUPP; 504f3b44f92SJens Axboe if ((kiocb->ki_flags & IOCB_NOWAIT) && 505f3b44f92SJens Axboe !(kiocb->ki_filp->f_flags & O_NONBLOCK)) 506f3b44f92SJens Axboe return -EAGAIN; 507f3b44f92SJens Axboe 508f3b44f92SJens Axboe ppos = io_kiocb_ppos(kiocb); 509f3b44f92SJens Axboe 510f3b44f92SJens Axboe while (iov_iter_count(iter)) { 511f3b44f92SJens Axboe struct iovec iovec; 512f3b44f92SJens Axboe ssize_t nr; 513f3b44f92SJens Axboe 514f3b44f92SJens Axboe if (!iov_iter_is_bvec(iter)) { 515f3b44f92SJens Axboe iovec = iov_iter_iovec(iter); 516f3b44f92SJens Axboe } else { 517f3b44f92SJens Axboe iovec.iov_base = u64_to_user_ptr(rw->addr); 518f3b44f92SJens Axboe iovec.iov_len = rw->len; 519f3b44f92SJens Axboe } 520f3b44f92SJens Axboe 521f3b44f92SJens Axboe if (ddir == READ) { 522f3b44f92SJens Axboe nr = file->f_op->read(file, iovec.iov_base, 523f3b44f92SJens Axboe iovec.iov_len, ppos); 524f3b44f92SJens Axboe } else { 525f3b44f92SJens Axboe nr = file->f_op->write(file, iovec.iov_base, 526f3b44f92SJens Axboe iovec.iov_len, ppos); 527f3b44f92SJens Axboe } 528f3b44f92SJens Axboe 529f3b44f92SJens Axboe if (nr < 0) { 530f3b44f92SJens Axboe if (!ret) 531f3b44f92SJens Axboe ret = nr; 532f3b44f92SJens Axboe break; 533f3b44f92SJens Axboe } 534f3b44f92SJens Axboe ret += nr; 535f3b44f92SJens Axboe if (!iov_iter_is_bvec(iter)) { 536f3b44f92SJens Axboe iov_iter_advance(iter, nr); 537f3b44f92SJens Axboe } else { 538f3b44f92SJens Axboe rw->addr += nr; 539f3b44f92SJens Axboe rw->len -= nr; 540f3b44f92SJens Axboe if (!rw->len) 541f3b44f92SJens Axboe break; 542f3b44f92SJens Axboe } 543f3b44f92SJens Axboe if (nr != iovec.iov_len) 544f3b44f92SJens Axboe break; 545f3b44f92SJens Axboe } 546f3b44f92SJens Axboe 547f3b44f92SJens Axboe return ret; 548f3b44f92SJens Axboe } 549f3b44f92SJens Axboe 550f3b44f92SJens Axboe static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, 551f3b44f92SJens Axboe const struct iovec *fast_iov, struct iov_iter *iter) 552f3b44f92SJens Axboe { 553f3b44f92SJens Axboe struct io_async_rw *io = req->async_data; 554f3b44f92SJens Axboe 555f3b44f92SJens Axboe memcpy(&io->s.iter, iter, sizeof(*iter)); 556f3b44f92SJens Axboe io->free_iovec = iovec; 557f3b44f92SJens Axboe io->bytes_done = 0; 558f3b44f92SJens Axboe /* can only be fixed buffers, no need to do anything */ 559f3b44f92SJens Axboe if (iov_iter_is_bvec(iter)) 560f3b44f92SJens Axboe return; 561f3b44f92SJens Axboe if (!iovec) { 562f3b44f92SJens Axboe unsigned iov_off = 0; 563f3b44f92SJens Axboe 564f3b44f92SJens Axboe io->s.iter.iov = io->s.fast_iov; 565f3b44f92SJens Axboe if (iter->iov != fast_iov) { 566f3b44f92SJens Axboe iov_off = iter->iov - fast_iov; 567f3b44f92SJens Axboe io->s.iter.iov += iov_off; 568f3b44f92SJens Axboe } 569f3b44f92SJens Axboe if (io->s.fast_iov != fast_iov) 570f3b44f92SJens Axboe memcpy(io->s.fast_iov + iov_off, fast_iov + iov_off, 571f3b44f92SJens Axboe sizeof(struct iovec) * iter->nr_segs); 572f3b44f92SJens Axboe } else { 573f3b44f92SJens Axboe req->flags |= REQ_F_NEED_CLEANUP; 574f3b44f92SJens Axboe } 575f3b44f92SJens Axboe } 576f3b44f92SJens Axboe 577f3b44f92SJens Axboe static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, 578f3b44f92SJens Axboe struct io_rw_state *s, bool force) 579f3b44f92SJens Axboe { 580f3b44f92SJens Axboe if (!force && !io_op_defs[req->opcode].prep_async) 581f3b44f92SJens Axboe return 0; 582f3b44f92SJens Axboe if (!req_has_async_data(req)) { 583f3b44f92SJens Axboe struct io_async_rw *iorw; 584f3b44f92SJens Axboe 585f3b44f92SJens Axboe if (io_alloc_async_data(req)) { 586f3b44f92SJens Axboe kfree(iovec); 587f3b44f92SJens Axboe return -ENOMEM; 588f3b44f92SJens Axboe } 589f3b44f92SJens Axboe 590f3b44f92SJens Axboe io_req_map_rw(req, iovec, s->fast_iov, &s->iter); 591f3b44f92SJens Axboe iorw = req->async_data; 592f3b44f92SJens Axboe /* we've copied and mapped the iter, ensure state is saved */ 593f3b44f92SJens Axboe iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state); 594f3b44f92SJens Axboe } 595f3b44f92SJens Axboe return 0; 596f3b44f92SJens Axboe } 597f3b44f92SJens Axboe 598f3b44f92SJens Axboe static inline int io_rw_prep_async(struct io_kiocb *req, int rw) 599f3b44f92SJens Axboe { 600f3b44f92SJens Axboe struct io_async_rw *iorw = req->async_data; 601f3b44f92SJens Axboe struct iovec *iov; 602f3b44f92SJens Axboe int ret; 603f3b44f92SJens Axboe 604f3b44f92SJens Axboe /* submission path, ->uring_lock should already be taken */ 605f3b44f92SJens Axboe ret = io_import_iovec(rw, req, &iov, &iorw->s, 0); 606f3b44f92SJens Axboe if (unlikely(ret < 0)) 607f3b44f92SJens Axboe return ret; 608f3b44f92SJens Axboe 609f3b44f92SJens Axboe iorw->bytes_done = 0; 610f3b44f92SJens Axboe iorw->free_iovec = iov; 611f3b44f92SJens Axboe if (iov) 612f3b44f92SJens Axboe req->flags |= REQ_F_NEED_CLEANUP; 613f3b44f92SJens Axboe return 0; 614f3b44f92SJens Axboe } 615f3b44f92SJens Axboe 616f3b44f92SJens Axboe int io_readv_prep_async(struct io_kiocb *req) 617f3b44f92SJens Axboe { 618f3b44f92SJens Axboe return io_rw_prep_async(req, READ); 619f3b44f92SJens Axboe } 620f3b44f92SJens Axboe 621f3b44f92SJens Axboe int io_writev_prep_async(struct io_kiocb *req) 622f3b44f92SJens Axboe { 623f3b44f92SJens Axboe return io_rw_prep_async(req, WRITE); 624f3b44f92SJens Axboe } 625f3b44f92SJens Axboe 626f3b44f92SJens Axboe /* 627f3b44f92SJens Axboe * This is our waitqueue callback handler, registered through __folio_lock_async() 628f3b44f92SJens Axboe * when we initially tried to do the IO with the iocb armed our waitqueue. 629f3b44f92SJens Axboe * This gets called when the page is unlocked, and we generally expect that to 630f3b44f92SJens Axboe * happen when the page IO is completed and the page is now uptodate. This will 631f3b44f92SJens Axboe * queue a task_work based retry of the operation, attempting to copy the data 632f3b44f92SJens Axboe * again. If the latter fails because the page was NOT uptodate, then we will 633f3b44f92SJens Axboe * do a thread based blocking retry of the operation. That's the unexpected 634f3b44f92SJens Axboe * slow path. 635f3b44f92SJens Axboe */ 636f3b44f92SJens Axboe static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, 637f3b44f92SJens Axboe int sync, void *arg) 638f3b44f92SJens Axboe { 639f3b44f92SJens Axboe struct wait_page_queue *wpq; 640f3b44f92SJens Axboe struct io_kiocb *req = wait->private; 641f3b44f92SJens Axboe struct io_rw *rw = io_kiocb_to_cmd(req); 642f3b44f92SJens Axboe struct wait_page_key *key = arg; 643f3b44f92SJens Axboe 644f3b44f92SJens Axboe wpq = container_of(wait, struct wait_page_queue, wait); 645f3b44f92SJens Axboe 646f3b44f92SJens Axboe if (!wake_page_match(wpq, key)) 647f3b44f92SJens Axboe return 0; 648f3b44f92SJens Axboe 649f3b44f92SJens Axboe rw->kiocb.ki_flags &= ~IOCB_WAITQ; 650f3b44f92SJens Axboe list_del_init(&wait->entry); 651f3b44f92SJens Axboe io_req_task_queue(req); 652f3b44f92SJens Axboe return 1; 653f3b44f92SJens Axboe } 654f3b44f92SJens Axboe 655f3b44f92SJens Axboe /* 656f3b44f92SJens Axboe * This controls whether a given IO request should be armed for async page 657f3b44f92SJens Axboe * based retry. If we return false here, the request is handed to the async 658f3b44f92SJens Axboe * worker threads for retry. If we're doing buffered reads on a regular file, 659f3b44f92SJens Axboe * we prepare a private wait_page_queue entry and retry the operation. This 660f3b44f92SJens Axboe * will either succeed because the page is now uptodate and unlocked, or it 661f3b44f92SJens Axboe * will register a callback when the page is unlocked at IO completion. Through 662f3b44f92SJens Axboe * that callback, io_uring uses task_work to setup a retry of the operation. 663f3b44f92SJens Axboe * That retry will attempt the buffered read again. The retry will generally 664f3b44f92SJens Axboe * succeed, or in rare cases where it fails, we then fall back to using the 665f3b44f92SJens Axboe * async worker threads for a blocking retry. 666f3b44f92SJens Axboe */ 667f3b44f92SJens Axboe static bool io_rw_should_retry(struct io_kiocb *req) 668f3b44f92SJens Axboe { 669f3b44f92SJens Axboe struct io_async_rw *io = req->async_data; 670f3b44f92SJens Axboe struct wait_page_queue *wait = &io->wpq; 671f3b44f92SJens Axboe struct io_rw *rw = io_kiocb_to_cmd(req); 672f3b44f92SJens Axboe struct kiocb *kiocb = &rw->kiocb; 673f3b44f92SJens Axboe 674f3b44f92SJens Axboe /* never retry for NOWAIT, we just complete with -EAGAIN */ 675f3b44f92SJens Axboe if (req->flags & REQ_F_NOWAIT) 676f3b44f92SJens Axboe return false; 677f3b44f92SJens Axboe 678f3b44f92SJens Axboe /* Only for buffered IO */ 679f3b44f92SJens Axboe if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI)) 680f3b44f92SJens Axboe return false; 681f3b44f92SJens Axboe 682f3b44f92SJens Axboe /* 683f3b44f92SJens Axboe * just use poll if we can, and don't attempt if the fs doesn't 684f3b44f92SJens Axboe * support callback based unlocks 685f3b44f92SJens Axboe */ 686f3b44f92SJens Axboe if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC)) 687f3b44f92SJens Axboe return false; 688f3b44f92SJens Axboe 689f3b44f92SJens Axboe wait->wait.func = io_async_buf_func; 690f3b44f92SJens Axboe wait->wait.private = req; 691f3b44f92SJens Axboe wait->wait.flags = 0; 692f3b44f92SJens Axboe INIT_LIST_HEAD(&wait->wait.entry); 693f3b44f92SJens Axboe kiocb->ki_flags |= IOCB_WAITQ; 694f3b44f92SJens Axboe kiocb->ki_flags &= ~IOCB_NOWAIT; 695f3b44f92SJens Axboe kiocb->ki_waitq = wait; 696f3b44f92SJens Axboe return true; 697f3b44f92SJens Axboe } 698f3b44f92SJens Axboe 699f3b44f92SJens Axboe static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter) 700f3b44f92SJens Axboe { 701f3b44f92SJens Axboe struct file *file = rw->kiocb.ki_filp; 702f3b44f92SJens Axboe 703f3b44f92SJens Axboe if (likely(file->f_op->read_iter)) 704f3b44f92SJens Axboe return call_read_iter(file, &rw->kiocb, iter); 705f3b44f92SJens Axboe else if (file->f_op->read) 706f3b44f92SJens Axboe return loop_rw_iter(READ, rw, iter); 707f3b44f92SJens Axboe else 708f3b44f92SJens Axboe return -EINVAL; 709f3b44f92SJens Axboe } 710f3b44f92SJens Axboe 711f3b44f92SJens Axboe static bool need_read_all(struct io_kiocb *req) 712f3b44f92SJens Axboe { 713f3b44f92SJens Axboe return req->flags & REQ_F_ISREG || 714f3b44f92SJens Axboe S_ISBLK(file_inode(req->file)->i_mode); 715f3b44f92SJens Axboe } 716f3b44f92SJens Axboe 717f3b44f92SJens Axboe static inline bool io_req_ffs_set(struct io_kiocb *req) 718f3b44f92SJens Axboe { 719f3b44f92SJens Axboe return req->flags & REQ_F_FIXED_FILE; 720f3b44f92SJens Axboe } 721f3b44f92SJens Axboe 722f3b44f92SJens Axboe static int io_rw_init_file(struct io_kiocb *req, fmode_t mode) 723f3b44f92SJens Axboe { 724f3b44f92SJens Axboe struct io_rw *rw = io_kiocb_to_cmd(req); 725f3b44f92SJens Axboe struct kiocb *kiocb = &rw->kiocb; 726f3b44f92SJens Axboe struct io_ring_ctx *ctx = req->ctx; 727f3b44f92SJens Axboe struct file *file = req->file; 728f3b44f92SJens Axboe int ret; 729f3b44f92SJens Axboe 730f3b44f92SJens Axboe if (unlikely(!file || !(file->f_mode & mode))) 731f3b44f92SJens Axboe return -EBADF; 732f3b44f92SJens Axboe 733f3b44f92SJens Axboe if (!io_req_ffs_set(req)) 734f3b44f92SJens Axboe req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT; 735f3b44f92SJens Axboe 736f3b44f92SJens Axboe kiocb->ki_flags = iocb_flags(file); 737f3b44f92SJens Axboe ret = kiocb_set_rw_flags(kiocb, rw->flags); 738f3b44f92SJens Axboe if (unlikely(ret)) 739f3b44f92SJens Axboe return ret; 740f3b44f92SJens Axboe 741f3b44f92SJens Axboe /* 742f3b44f92SJens Axboe * If the file is marked O_NONBLOCK, still allow retry for it if it 743f3b44f92SJens Axboe * supports async. Otherwise it's impossible to use O_NONBLOCK files 744f3b44f92SJens Axboe * reliably. If not, or it IOCB_NOWAIT is set, don't retry. 745f3b44f92SJens Axboe */ 746f3b44f92SJens Axboe if ((kiocb->ki_flags & IOCB_NOWAIT) || 747f3b44f92SJens Axboe ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req))) 748f3b44f92SJens Axboe req->flags |= REQ_F_NOWAIT; 749f3b44f92SJens Axboe 750f3b44f92SJens Axboe if (ctx->flags & IORING_SETUP_IOPOLL) { 751f3b44f92SJens Axboe if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll) 752f3b44f92SJens Axboe return -EOPNOTSUPP; 753f3b44f92SJens Axboe 754f3b44f92SJens Axboe kiocb->private = NULL; 755f3b44f92SJens Axboe kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE; 756f3b44f92SJens Axboe kiocb->ki_complete = io_complete_rw_iopoll; 757f3b44f92SJens Axboe req->iopoll_completed = 0; 758f3b44f92SJens Axboe } else { 759f3b44f92SJens Axboe if (kiocb->ki_flags & IOCB_HIPRI) 760f3b44f92SJens Axboe return -EINVAL; 761f3b44f92SJens Axboe kiocb->ki_complete = io_complete_rw; 762f3b44f92SJens Axboe } 763f3b44f92SJens Axboe 764f3b44f92SJens Axboe return 0; 765f3b44f92SJens Axboe } 766f3b44f92SJens Axboe 767f3b44f92SJens Axboe int io_read(struct io_kiocb *req, unsigned int issue_flags) 768f3b44f92SJens Axboe { 769f3b44f92SJens Axboe struct io_rw *rw = io_kiocb_to_cmd(req); 770f3b44f92SJens Axboe struct io_rw_state __s, *s = &__s; 771f3b44f92SJens Axboe struct iovec *iovec; 772f3b44f92SJens Axboe struct kiocb *kiocb = &rw->kiocb; 773f3b44f92SJens Axboe bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 774f3b44f92SJens Axboe struct io_async_rw *io; 775f3b44f92SJens Axboe ssize_t ret, ret2; 776f3b44f92SJens Axboe loff_t *ppos; 777f3b44f92SJens Axboe 778f3b44f92SJens Axboe if (!req_has_async_data(req)) { 779f3b44f92SJens Axboe ret = io_import_iovec(READ, req, &iovec, s, issue_flags); 780f3b44f92SJens Axboe if (unlikely(ret < 0)) 781f3b44f92SJens Axboe return ret; 782f3b44f92SJens Axboe } else { 783f3b44f92SJens Axboe io = req->async_data; 784f3b44f92SJens Axboe s = &io->s; 785f3b44f92SJens Axboe 786f3b44f92SJens Axboe /* 787f3b44f92SJens Axboe * Safe and required to re-import if we're using provided 788f3b44f92SJens Axboe * buffers, as we dropped the selected one before retry. 789f3b44f92SJens Axboe */ 790f3b44f92SJens Axboe if (io_do_buffer_select(req)) { 791f3b44f92SJens Axboe ret = io_import_iovec(READ, req, &iovec, s, issue_flags); 792f3b44f92SJens Axboe if (unlikely(ret < 0)) 793f3b44f92SJens Axboe return ret; 794f3b44f92SJens Axboe } 795f3b44f92SJens Axboe 796f3b44f92SJens Axboe /* 797f3b44f92SJens Axboe * We come here from an earlier attempt, restore our state to 798f3b44f92SJens Axboe * match in case it doesn't. It's cheap enough that we don't 799f3b44f92SJens Axboe * need to make this conditional. 800f3b44f92SJens Axboe */ 801f3b44f92SJens Axboe iov_iter_restore(&s->iter, &s->iter_state); 802f3b44f92SJens Axboe iovec = NULL; 803f3b44f92SJens Axboe } 804f3b44f92SJens Axboe ret = io_rw_init_file(req, FMODE_READ); 805f3b44f92SJens Axboe if (unlikely(ret)) { 806f3b44f92SJens Axboe kfree(iovec); 807f3b44f92SJens Axboe return ret; 808f3b44f92SJens Axboe } 809f3b44f92SJens Axboe req->cqe.res = iov_iter_count(&s->iter); 810f3b44f92SJens Axboe 811f3b44f92SJens Axboe if (force_nonblock) { 812f3b44f92SJens Axboe /* If the file doesn't support async, just async punt */ 813f3b44f92SJens Axboe if (unlikely(!io_file_supports_nowait(req))) { 814f3b44f92SJens Axboe ret = io_setup_async_rw(req, iovec, s, true); 815f3b44f92SJens Axboe return ret ?: -EAGAIN; 816f3b44f92SJens Axboe } 817f3b44f92SJens Axboe kiocb->ki_flags |= IOCB_NOWAIT; 818f3b44f92SJens Axboe } else { 819f3b44f92SJens Axboe /* Ensure we clear previously set non-block flag */ 820f3b44f92SJens Axboe kiocb->ki_flags &= ~IOCB_NOWAIT; 821f3b44f92SJens Axboe } 822f3b44f92SJens Axboe 823f3b44f92SJens Axboe ppos = io_kiocb_update_pos(req); 824f3b44f92SJens Axboe 825f3b44f92SJens Axboe ret = rw_verify_area(READ, req->file, ppos, req->cqe.res); 826f3b44f92SJens Axboe if (unlikely(ret)) { 827f3b44f92SJens Axboe kfree(iovec); 828f3b44f92SJens Axboe return ret; 829f3b44f92SJens Axboe } 830f3b44f92SJens Axboe 831f3b44f92SJens Axboe ret = io_iter_do_read(rw, &s->iter); 832f3b44f92SJens Axboe 833f3b44f92SJens Axboe if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { 834f3b44f92SJens Axboe req->flags &= ~REQ_F_REISSUE; 835f3b44f92SJens Axboe /* if we can poll, just do that */ 836f3b44f92SJens Axboe if (req->opcode == IORING_OP_READ && file_can_poll(req->file)) 837f3b44f92SJens Axboe return -EAGAIN; 838f3b44f92SJens Axboe /* IOPOLL retry should happen for io-wq threads */ 839f3b44f92SJens Axboe if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) 840f3b44f92SJens Axboe goto done; 841f3b44f92SJens Axboe /* no retry on NONBLOCK nor RWF_NOWAIT */ 842f3b44f92SJens Axboe if (req->flags & REQ_F_NOWAIT) 843f3b44f92SJens Axboe goto done; 844f3b44f92SJens Axboe ret = 0; 845f3b44f92SJens Axboe } else if (ret == -EIOCBQUEUED) { 846df9830d8SPavel Begunkov if (iovec) 847df9830d8SPavel Begunkov kfree(iovec); 848df9830d8SPavel Begunkov return IOU_ISSUE_SKIP_COMPLETE; 849f3b44f92SJens Axboe } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock || 850f3b44f92SJens Axboe (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) { 851f3b44f92SJens Axboe /* read all, failed, already did sync or don't want to retry */ 852f3b44f92SJens Axboe goto done; 853f3b44f92SJens Axboe } 854f3b44f92SJens Axboe 855f3b44f92SJens Axboe /* 856f3b44f92SJens Axboe * Don't depend on the iter state matching what was consumed, or being 857f3b44f92SJens Axboe * untouched in case of error. Restore it and we'll advance it 858f3b44f92SJens Axboe * manually if we need to. 859f3b44f92SJens Axboe */ 860f3b44f92SJens Axboe iov_iter_restore(&s->iter, &s->iter_state); 861f3b44f92SJens Axboe 862f3b44f92SJens Axboe ret2 = io_setup_async_rw(req, iovec, s, true); 863f3b44f92SJens Axboe if (ret2) 864f3b44f92SJens Axboe return ret2; 865f3b44f92SJens Axboe 866f3b44f92SJens Axboe iovec = NULL; 867f3b44f92SJens Axboe io = req->async_data; 868f3b44f92SJens Axboe s = &io->s; 869f3b44f92SJens Axboe /* 870f3b44f92SJens Axboe * Now use our persistent iterator and state, if we aren't already. 871f3b44f92SJens Axboe * We've restored and mapped the iter to match. 872f3b44f92SJens Axboe */ 873f3b44f92SJens Axboe 874f3b44f92SJens Axboe do { 875f3b44f92SJens Axboe /* 876f3b44f92SJens Axboe * We end up here because of a partial read, either from 877f3b44f92SJens Axboe * above or inside this loop. Advance the iter by the bytes 878f3b44f92SJens Axboe * that were consumed. 879f3b44f92SJens Axboe */ 880f3b44f92SJens Axboe iov_iter_advance(&s->iter, ret); 881f3b44f92SJens Axboe if (!iov_iter_count(&s->iter)) 882f3b44f92SJens Axboe break; 883f3b44f92SJens Axboe io->bytes_done += ret; 884f3b44f92SJens Axboe iov_iter_save_state(&s->iter, &s->iter_state); 885f3b44f92SJens Axboe 886f3b44f92SJens Axboe /* if we can retry, do so with the callbacks armed */ 887f3b44f92SJens Axboe if (!io_rw_should_retry(req)) { 888f3b44f92SJens Axboe kiocb->ki_flags &= ~IOCB_WAITQ; 889f3b44f92SJens Axboe return -EAGAIN; 890f3b44f92SJens Axboe } 891f3b44f92SJens Axboe 892f3b44f92SJens Axboe /* 893f3b44f92SJens Axboe * Now retry read with the IOCB_WAITQ parts set in the iocb. If 894f3b44f92SJens Axboe * we get -EIOCBQUEUED, then we'll get a notification when the 895f3b44f92SJens Axboe * desired page gets unlocked. We can also get a partial read 896f3b44f92SJens Axboe * here, and if we do, then just retry at the new offset. 897f3b44f92SJens Axboe */ 898f3b44f92SJens Axboe ret = io_iter_do_read(rw, &s->iter); 899f3b44f92SJens Axboe if (ret == -EIOCBQUEUED) 900f3b44f92SJens Axboe return IOU_ISSUE_SKIP_COMPLETE; 901f3b44f92SJens Axboe /* we got some bytes, but not all. retry. */ 902f3b44f92SJens Axboe kiocb->ki_flags &= ~IOCB_WAITQ; 903f3b44f92SJens Axboe iov_iter_restore(&s->iter, &s->iter_state); 904f3b44f92SJens Axboe } while (ret > 0); 905f3b44f92SJens Axboe done: 906f3b44f92SJens Axboe /* it's faster to check here then delegate to kfree */ 907f3b44f92SJens Axboe if (iovec) 908f3b44f92SJens Axboe kfree(iovec); 909df9830d8SPavel Begunkov return kiocb_done(req, ret, issue_flags); 910f3b44f92SJens Axboe } 911f3b44f92SJens Axboe 912f3b44f92SJens Axboe int io_write(struct io_kiocb *req, unsigned int issue_flags) 913f3b44f92SJens Axboe { 914f3b44f92SJens Axboe struct io_rw *rw = io_kiocb_to_cmd(req); 915f3b44f92SJens Axboe struct io_rw_state __s, *s = &__s; 916f3b44f92SJens Axboe struct iovec *iovec; 917f3b44f92SJens Axboe struct kiocb *kiocb = &rw->kiocb; 918f3b44f92SJens Axboe bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 919f3b44f92SJens Axboe ssize_t ret, ret2; 920f3b44f92SJens Axboe loff_t *ppos; 921f3b44f92SJens Axboe 922f3b44f92SJens Axboe if (!req_has_async_data(req)) { 923f3b44f92SJens Axboe ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags); 924f3b44f92SJens Axboe if (unlikely(ret < 0)) 925f3b44f92SJens Axboe return ret; 926f3b44f92SJens Axboe } else { 927f3b44f92SJens Axboe struct io_async_rw *io = req->async_data; 928f3b44f92SJens Axboe 929f3b44f92SJens Axboe s = &io->s; 930f3b44f92SJens Axboe iov_iter_restore(&s->iter, &s->iter_state); 931f3b44f92SJens Axboe iovec = NULL; 932f3b44f92SJens Axboe } 933f3b44f92SJens Axboe ret = io_rw_init_file(req, FMODE_WRITE); 934f3b44f92SJens Axboe if (unlikely(ret)) { 935f3b44f92SJens Axboe kfree(iovec); 936f3b44f92SJens Axboe return ret; 937f3b44f92SJens Axboe } 938f3b44f92SJens Axboe req->cqe.res = iov_iter_count(&s->iter); 939f3b44f92SJens Axboe 940f3b44f92SJens Axboe if (force_nonblock) { 941f3b44f92SJens Axboe /* If the file doesn't support async, just async punt */ 942f3b44f92SJens Axboe if (unlikely(!io_file_supports_nowait(req))) 943f3b44f92SJens Axboe goto copy_iov; 944f3b44f92SJens Axboe 945f3b44f92SJens Axboe /* file path doesn't support NOWAIT for non-direct_IO */ 946f3b44f92SJens Axboe if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) && 947f3b44f92SJens Axboe (req->flags & REQ_F_ISREG)) 948f3b44f92SJens Axboe goto copy_iov; 949f3b44f92SJens Axboe 950f3b44f92SJens Axboe kiocb->ki_flags |= IOCB_NOWAIT; 951f3b44f92SJens Axboe } else { 952f3b44f92SJens Axboe /* Ensure we clear previously set non-block flag */ 953f3b44f92SJens Axboe kiocb->ki_flags &= ~IOCB_NOWAIT; 954f3b44f92SJens Axboe } 955f3b44f92SJens Axboe 956f3b44f92SJens Axboe ppos = io_kiocb_update_pos(req); 957f3b44f92SJens Axboe 958f3b44f92SJens Axboe ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res); 959df9830d8SPavel Begunkov if (unlikely(ret)) { 960df9830d8SPavel Begunkov kfree(iovec); 961df9830d8SPavel Begunkov return ret; 962df9830d8SPavel Begunkov } 963f3b44f92SJens Axboe 964f3b44f92SJens Axboe /* 965f3b44f92SJens Axboe * Open-code file_start_write here to grab freeze protection, 966f3b44f92SJens Axboe * which will be released by another thread in 967f3b44f92SJens Axboe * io_complete_rw(). Fool lockdep by telling it the lock got 968f3b44f92SJens Axboe * released so that it doesn't complain about the held lock when 969f3b44f92SJens Axboe * we return to userspace. 970f3b44f92SJens Axboe */ 971f3b44f92SJens Axboe if (req->flags & REQ_F_ISREG) { 972f3b44f92SJens Axboe sb_start_write(file_inode(req->file)->i_sb); 973f3b44f92SJens Axboe __sb_writers_release(file_inode(req->file)->i_sb, 974f3b44f92SJens Axboe SB_FREEZE_WRITE); 975f3b44f92SJens Axboe } 976f3b44f92SJens Axboe kiocb->ki_flags |= IOCB_WRITE; 977f3b44f92SJens Axboe 978f3b44f92SJens Axboe if (likely(req->file->f_op->write_iter)) 979f3b44f92SJens Axboe ret2 = call_write_iter(req->file, kiocb, &s->iter); 980f3b44f92SJens Axboe else if (req->file->f_op->write) 981f3b44f92SJens Axboe ret2 = loop_rw_iter(WRITE, rw, &s->iter); 982f3b44f92SJens Axboe else 983f3b44f92SJens Axboe ret2 = -EINVAL; 984f3b44f92SJens Axboe 985f3b44f92SJens Axboe if (req->flags & REQ_F_REISSUE) { 986f3b44f92SJens Axboe req->flags &= ~REQ_F_REISSUE; 987f3b44f92SJens Axboe ret2 = -EAGAIN; 988f3b44f92SJens Axboe } 989f3b44f92SJens Axboe 990f3b44f92SJens Axboe /* 991f3b44f92SJens Axboe * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just 992f3b44f92SJens Axboe * retry them without IOCB_NOWAIT. 993f3b44f92SJens Axboe */ 994f3b44f92SJens Axboe if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT)) 995f3b44f92SJens Axboe ret2 = -EAGAIN; 996f3b44f92SJens Axboe /* no retry on NONBLOCK nor RWF_NOWAIT */ 997f3b44f92SJens Axboe if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) 998f3b44f92SJens Axboe goto done; 999f3b44f92SJens Axboe if (!force_nonblock || ret2 != -EAGAIN) { 1000f3b44f92SJens Axboe /* IOPOLL retry should happen for io-wq threads */ 1001f3b44f92SJens Axboe if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL)) 1002f3b44f92SJens Axboe goto copy_iov; 1003f3b44f92SJens Axboe done: 1004df9830d8SPavel Begunkov ret = kiocb_done(req, ret2, issue_flags); 1005f3b44f92SJens Axboe } else { 1006f3b44f92SJens Axboe copy_iov: 1007f3b44f92SJens Axboe iov_iter_restore(&s->iter, &s->iter_state); 1008f3b44f92SJens Axboe ret = io_setup_async_rw(req, iovec, s, false); 1009f3b44f92SJens Axboe return ret ?: -EAGAIN; 1010f3b44f92SJens Axboe } 1011f3b44f92SJens Axboe /* it's reportedly faster than delegating the null check to kfree() */ 1012f3b44f92SJens Axboe if (iovec) 1013f3b44f92SJens Axboe kfree(iovec); 1014f3b44f92SJens Axboe return ret; 1015f3b44f92SJens Axboe } 1016f3b44f92SJens Axboe 1017f3b44f92SJens Axboe static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) 1018f3b44f92SJens Axboe { 1019*46929b08SPavel Begunkov io_commit_cqring_flush(ctx); 1020f3b44f92SJens Axboe if (ctx->flags & IORING_SETUP_SQPOLL) 1021f3b44f92SJens Axboe io_cqring_wake(ctx); 1022f3b44f92SJens Axboe } 1023f3b44f92SJens Axboe 1024f3b44f92SJens Axboe int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) 1025f3b44f92SJens Axboe { 1026f3b44f92SJens Axboe struct io_wq_work_node *pos, *start, *prev; 1027f3b44f92SJens Axboe unsigned int poll_flags = BLK_POLL_NOSLEEP; 1028f3b44f92SJens Axboe DEFINE_IO_COMP_BATCH(iob); 1029f3b44f92SJens Axboe int nr_events = 0; 1030f3b44f92SJens Axboe 1031f3b44f92SJens Axboe /* 1032f3b44f92SJens Axboe * Only spin for completions if we don't have multiple devices hanging 1033f3b44f92SJens Axboe * off our complete list. 1034f3b44f92SJens Axboe */ 1035f3b44f92SJens Axboe if (ctx->poll_multi_queue || force_nonspin) 1036f3b44f92SJens Axboe poll_flags |= BLK_POLL_ONESHOT; 1037f3b44f92SJens Axboe 1038f3b44f92SJens Axboe wq_list_for_each(pos, start, &ctx->iopoll_list) { 1039f3b44f92SJens Axboe struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); 1040f3b44f92SJens Axboe struct io_rw *rw = io_kiocb_to_cmd(req); 1041f3b44f92SJens Axboe int ret; 1042f3b44f92SJens Axboe 1043f3b44f92SJens Axboe /* 1044f3b44f92SJens Axboe * Move completed and retryable entries to our local lists. 1045f3b44f92SJens Axboe * If we find a request that requires polling, break out 1046f3b44f92SJens Axboe * and complete those lists first, if we have entries there. 1047f3b44f92SJens Axboe */ 1048f3b44f92SJens Axboe if (READ_ONCE(req->iopoll_completed)) 1049f3b44f92SJens Axboe break; 1050f3b44f92SJens Axboe 1051f3b44f92SJens Axboe ret = rw->kiocb.ki_filp->f_op->iopoll(&rw->kiocb, &iob, poll_flags); 1052f3b44f92SJens Axboe if (unlikely(ret < 0)) 1053f3b44f92SJens Axboe return ret; 1054f3b44f92SJens Axboe else if (ret) 1055f3b44f92SJens Axboe poll_flags |= BLK_POLL_ONESHOT; 1056f3b44f92SJens Axboe 1057f3b44f92SJens Axboe /* iopoll may have completed current req */ 1058f3b44f92SJens Axboe if (!rq_list_empty(iob.req_list) || 1059f3b44f92SJens Axboe READ_ONCE(req->iopoll_completed)) 1060f3b44f92SJens Axboe break; 1061f3b44f92SJens Axboe } 1062f3b44f92SJens Axboe 1063f3b44f92SJens Axboe if (!rq_list_empty(iob.req_list)) 1064f3b44f92SJens Axboe iob.complete(&iob); 1065f3b44f92SJens Axboe else if (!pos) 1066f3b44f92SJens Axboe return 0; 1067f3b44f92SJens Axboe 1068f3b44f92SJens Axboe prev = start; 1069f3b44f92SJens Axboe wq_list_for_each_resume(pos, prev) { 1070f3b44f92SJens Axboe struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); 1071f3b44f92SJens Axboe 1072f3b44f92SJens Axboe /* order with io_complete_rw_iopoll(), e.g. ->result updates */ 1073f3b44f92SJens Axboe if (!smp_load_acquire(&req->iopoll_completed)) 1074f3b44f92SJens Axboe break; 1075f3b44f92SJens Axboe nr_events++; 1076f3b44f92SJens Axboe if (unlikely(req->flags & REQ_F_CQE_SKIP)) 1077f3b44f92SJens Axboe continue; 1078f3b44f92SJens Axboe 1079f3b44f92SJens Axboe req->cqe.flags = io_put_kbuf(req, 0); 1080f3b44f92SJens Axboe __io_fill_cqe_req(req->ctx, req); 1081f3b44f92SJens Axboe } 1082f3b44f92SJens Axboe 1083f3b44f92SJens Axboe if (unlikely(!nr_events)) 1084f3b44f92SJens Axboe return 0; 1085f3b44f92SJens Axboe 1086f3b44f92SJens Axboe io_commit_cqring(ctx); 1087f3b44f92SJens Axboe io_cqring_ev_posted_iopoll(ctx); 1088f3b44f92SJens Axboe pos = start ? start->next : ctx->iopoll_list.first; 1089f3b44f92SJens Axboe wq_list_cut(&ctx->iopoll_list, prev, start); 1090f3b44f92SJens Axboe io_free_batch_list(ctx, pos); 1091f3b44f92SJens Axboe return nr_events; 1092f3b44f92SJens Axboe } 1093