xref: /openbmc/linux/io_uring/rw.c (revision 360823a09426347ea8f232b0b0b5156d0aed0302)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/blk-mq.h>
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/fsnotify.h>
10 #include <linux/poll.h>
11 #include <linux/nospec.h>
12 #include <linux/compat.h>
13 #include <linux/io_uring.h>
14 
15 #include <uapi/linux/io_uring.h>
16 
17 #include "io_uring.h"
18 #include "opdef.h"
19 #include "kbuf.h"
20 #include "rsrc.h"
21 #include "rw.h"
22 
23 struct io_rw {
24 	/* NOTE: kiocb has the file as the first member, so don't do it here */
25 	struct kiocb			kiocb;
26 	u64				addr;
27 	u32				len;
28 	rwf_t				flags;
29 };
30 
io_file_supports_nowait(struct io_kiocb * req)31 static inline bool io_file_supports_nowait(struct io_kiocb *req)
32 {
33 	return req->flags & REQ_F_SUPPORT_NOWAIT;
34 }
35 
36 #ifdef CONFIG_COMPAT
io_iov_compat_buffer_select_prep(struct io_rw * rw)37 static int io_iov_compat_buffer_select_prep(struct io_rw *rw)
38 {
39 	struct compat_iovec __user *uiov;
40 	compat_ssize_t clen;
41 
42 	uiov = u64_to_user_ptr(rw->addr);
43 	if (!access_ok(uiov, sizeof(*uiov)))
44 		return -EFAULT;
45 	if (__get_user(clen, &uiov->iov_len))
46 		return -EFAULT;
47 	if (clen < 0)
48 		return -EINVAL;
49 
50 	rw->len = clen;
51 	return 0;
52 }
53 #endif
54 
io_iov_buffer_select_prep(struct io_kiocb * req)55 static int io_iov_buffer_select_prep(struct io_kiocb *req)
56 {
57 	struct iovec __user *uiov;
58 	struct iovec iov;
59 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
60 
61 	if (rw->len != 1)
62 		return -EINVAL;
63 
64 #ifdef CONFIG_COMPAT
65 	if (req->ctx->compat)
66 		return io_iov_compat_buffer_select_prep(rw);
67 #endif
68 
69 	uiov = u64_to_user_ptr(rw->addr);
70 	if (copy_from_user(&iov, uiov, sizeof(*uiov)))
71 		return -EFAULT;
72 	rw->len = iov.iov_len;
73 	return 0;
74 }
75 
io_prep_rw(struct io_kiocb * req,const struct io_uring_sqe * sqe)76 int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
77 {
78 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
79 	unsigned ioprio;
80 	int ret;
81 
82 	rw->kiocb.ki_pos = READ_ONCE(sqe->off);
83 	/* used for fixed read/write too - just read unconditionally */
84 	req->buf_index = READ_ONCE(sqe->buf_index);
85 
86 	if (req->opcode == IORING_OP_READ_FIXED ||
87 	    req->opcode == IORING_OP_WRITE_FIXED) {
88 		struct io_ring_ctx *ctx = req->ctx;
89 		u16 index;
90 
91 		if (unlikely(req->buf_index >= ctx->nr_user_bufs))
92 			return -EFAULT;
93 		index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
94 		req->imu = ctx->user_bufs[index];
95 		io_req_set_rsrc_node(req, ctx, 0);
96 	}
97 
98 	ioprio = READ_ONCE(sqe->ioprio);
99 	if (ioprio) {
100 		ret = ioprio_check_cap(ioprio);
101 		if (ret)
102 			return ret;
103 
104 		rw->kiocb.ki_ioprio = ioprio;
105 	} else {
106 		rw->kiocb.ki_ioprio = get_current_ioprio();
107 	}
108 	rw->kiocb.dio_complete = NULL;
109 
110 	rw->addr = READ_ONCE(sqe->addr);
111 	rw->len = READ_ONCE(sqe->len);
112 	rw->flags = READ_ONCE(sqe->rw_flags);
113 
114 	/* Have to do this validation here, as this is in io_read() rw->len might
115 	 * have chanaged due to buffer selection
116 	 */
117 	if (req->opcode == IORING_OP_READV && req->flags & REQ_F_BUFFER_SELECT) {
118 		ret = io_iov_buffer_select_prep(req);
119 		if (ret)
120 			return ret;
121 	}
122 
123 	return 0;
124 }
125 
io_readv_writev_cleanup(struct io_kiocb * req)126 void io_readv_writev_cleanup(struct io_kiocb *req)
127 {
128 	struct io_async_rw *io = req->async_data;
129 
130 	kfree(io->free_iovec);
131 }
132 
io_rw_done(struct kiocb * kiocb,ssize_t ret)133 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
134 {
135 	switch (ret) {
136 	case -EIOCBQUEUED:
137 		break;
138 	case -ERESTARTSYS:
139 	case -ERESTARTNOINTR:
140 	case -ERESTARTNOHAND:
141 	case -ERESTART_RESTARTBLOCK:
142 		/*
143 		 * We can't just restart the syscall, since previously
144 		 * submitted sqes may already be in progress. Just fail this
145 		 * IO with EINTR.
146 		 */
147 		ret = -EINTR;
148 		fallthrough;
149 	default:
150 		kiocb->ki_complete(kiocb, ret);
151 	}
152 }
153 
io_kiocb_update_pos(struct io_kiocb * req)154 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
155 {
156 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
157 
158 	if (rw->kiocb.ki_pos != -1)
159 		return &rw->kiocb.ki_pos;
160 
161 	if (!(req->file->f_mode & FMODE_STREAM)) {
162 		req->flags |= REQ_F_CUR_POS;
163 		rw->kiocb.ki_pos = req->file->f_pos;
164 		return &rw->kiocb.ki_pos;
165 	}
166 
167 	rw->kiocb.ki_pos = 0;
168 	return NULL;
169 }
170 
171 #ifdef CONFIG_BLOCK
io_resubmit_prep(struct io_kiocb * req)172 static bool io_resubmit_prep(struct io_kiocb *req)
173 {
174 	struct io_async_rw *io = req->async_data;
175 
176 	if (!req_has_async_data(req))
177 		return !io_req_prep_async(req);
178 	iov_iter_restore(&io->s.iter, &io->s.iter_state);
179 	return true;
180 }
181 
io_rw_should_reissue(struct io_kiocb * req)182 static bool io_rw_should_reissue(struct io_kiocb *req)
183 {
184 	umode_t mode = file_inode(req->file)->i_mode;
185 	struct io_ring_ctx *ctx = req->ctx;
186 
187 	if (!S_ISBLK(mode) && !S_ISREG(mode))
188 		return false;
189 	if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
190 	    !(ctx->flags & IORING_SETUP_IOPOLL)))
191 		return false;
192 	/*
193 	 * If ref is dying, we might be running poll reap from the exit work.
194 	 * Don't attempt to reissue from that path, just let it fail with
195 	 * -EAGAIN.
196 	 */
197 	if (percpu_ref_is_dying(&ctx->refs))
198 		return false;
199 	/*
200 	 * Play it safe and assume not safe to re-import and reissue if we're
201 	 * not in the original thread group (or in task context).
202 	 */
203 	if (!same_thread_group(req->task, current) || !in_task())
204 		return false;
205 	return true;
206 }
207 #else
io_resubmit_prep(struct io_kiocb * req)208 static bool io_resubmit_prep(struct io_kiocb *req)
209 {
210 	return false;
211 }
io_rw_should_reissue(struct io_kiocb * req)212 static bool io_rw_should_reissue(struct io_kiocb *req)
213 {
214 	return false;
215 }
216 #endif
217 
io_req_end_write(struct io_kiocb * req)218 static void io_req_end_write(struct io_kiocb *req)
219 {
220 	if (req->flags & REQ_F_ISREG) {
221 		struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
222 
223 		kiocb_end_write(&rw->kiocb);
224 	}
225 }
226 
227 /*
228  * Trigger the notifications after having done some IO, and finish the write
229  * accounting, if any.
230  */
io_req_io_end(struct io_kiocb * req)231 static void io_req_io_end(struct io_kiocb *req)
232 {
233 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
234 
235 	if (rw->kiocb.ki_flags & IOCB_WRITE) {
236 		io_req_end_write(req);
237 		fsnotify_modify(req->file);
238 	} else {
239 		fsnotify_access(req->file);
240 	}
241 }
242 
__io_complete_rw_common(struct io_kiocb * req,long res)243 static bool __io_complete_rw_common(struct io_kiocb *req, long res)
244 {
245 	if (unlikely(res != req->cqe.res)) {
246 		if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
247 		    io_rw_should_reissue(req)) {
248 			/*
249 			 * Reissue will start accounting again, finish the
250 			 * current cycle.
251 			 */
252 			io_req_io_end(req);
253 			req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
254 			return true;
255 		}
256 		req_set_fail(req);
257 		req->cqe.res = res;
258 	}
259 	return false;
260 }
261 
io_fixup_rw_res(struct io_kiocb * req,long res)262 static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
263 {
264 	struct io_async_rw *io = req->async_data;
265 
266 	/* add previously done IO, if any */
267 	if (req_has_async_data(req) && io->bytes_done > 0) {
268 		if (res < 0)
269 			res = io->bytes_done;
270 		else
271 			res += io->bytes_done;
272 	}
273 	return res;
274 }
275 
io_req_rw_complete(struct io_kiocb * req,struct io_tw_state * ts)276 void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts)
277 {
278 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
279 	struct kiocb *kiocb = &rw->kiocb;
280 
281 	if ((kiocb->ki_flags & IOCB_DIO_CALLER_COMP) && kiocb->dio_complete) {
282 		long res = kiocb->dio_complete(rw->kiocb.private);
283 
284 		io_req_set_res(req, io_fixup_rw_res(req, res), 0);
285 	}
286 
287 	io_req_io_end(req);
288 
289 	if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
290 		unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED;
291 
292 		req->cqe.flags |= io_put_kbuf(req, issue_flags);
293 	}
294 	io_req_task_complete(req, ts);
295 }
296 
io_complete_rw(struct kiocb * kiocb,long res)297 static void io_complete_rw(struct kiocb *kiocb, long res)
298 {
299 	struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
300 	struct io_kiocb *req = cmd_to_io_kiocb(rw);
301 
302 	if (!kiocb->dio_complete || !(kiocb->ki_flags & IOCB_DIO_CALLER_COMP)) {
303 		if (__io_complete_rw_common(req, res))
304 			return;
305 		io_req_set_res(req, io_fixup_rw_res(req, res), 0);
306 	}
307 	req->io_task_work.func = io_req_rw_complete;
308 	__io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE);
309 }
310 
io_complete_rw_iopoll(struct kiocb * kiocb,long res)311 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
312 {
313 	struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
314 	struct io_kiocb *req = cmd_to_io_kiocb(rw);
315 
316 	if (kiocb->ki_flags & IOCB_WRITE)
317 		io_req_end_write(req);
318 	if (unlikely(res != req->cqe.res)) {
319 		if (res == -EAGAIN && io_rw_should_reissue(req)) {
320 			req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
321 			return;
322 		}
323 		req->cqe.res = res;
324 	}
325 
326 	/* order with io_iopoll_complete() checking ->iopoll_completed */
327 	smp_store_release(&req->iopoll_completed, 1);
328 }
329 
kiocb_done(struct io_kiocb * req,ssize_t ret,unsigned int issue_flags)330 static int kiocb_done(struct io_kiocb *req, ssize_t ret,
331 		       unsigned int issue_flags)
332 {
333 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
334 	unsigned final_ret = io_fixup_rw_res(req, ret);
335 
336 	if (ret >= 0 && req->flags & REQ_F_CUR_POS)
337 		req->file->f_pos = rw->kiocb.ki_pos;
338 	if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
339 		if (!__io_complete_rw_common(req, ret)) {
340 			/*
341 			 * Safe to call io_end from here as we're inline
342 			 * from the submission path.
343 			 */
344 			io_req_io_end(req);
345 			io_req_set_res(req, final_ret,
346 				       io_put_kbuf(req, issue_flags));
347 			return IOU_OK;
348 		}
349 	} else {
350 		io_rw_done(&rw->kiocb, ret);
351 	}
352 
353 	if (req->flags & REQ_F_REISSUE) {
354 		req->flags &= ~REQ_F_REISSUE;
355 		if (io_resubmit_prep(req))
356 			return -EAGAIN;
357 		else
358 			io_req_task_queue_fail(req, final_ret);
359 	}
360 	return IOU_ISSUE_SKIP_COMPLETE;
361 }
362 
__io_import_iovec(int ddir,struct io_kiocb * req,struct io_rw_state * s,unsigned int issue_flags)363 static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
364 				       struct io_rw_state *s,
365 				       unsigned int issue_flags)
366 {
367 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
368 	struct iov_iter *iter = &s->iter;
369 	u8 opcode = req->opcode;
370 	struct iovec *iovec;
371 	void __user *buf;
372 	size_t sqe_len;
373 	ssize_t ret;
374 
375 	if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
376 		ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len);
377 		if (ret)
378 			return ERR_PTR(ret);
379 		return NULL;
380 	}
381 
382 	buf = u64_to_user_ptr(rw->addr);
383 	sqe_len = rw->len;
384 
385 	if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE ||
386 	    (req->flags & REQ_F_BUFFER_SELECT)) {
387 		if (io_do_buffer_select(req)) {
388 			buf = io_buffer_select(req, &sqe_len, issue_flags);
389 			if (!buf)
390 				return ERR_PTR(-ENOBUFS);
391 			rw->addr = (unsigned long) buf;
392 			rw->len = sqe_len;
393 		}
394 
395 		ret = import_ubuf(ddir, buf, sqe_len, iter);
396 		if (ret)
397 			return ERR_PTR(ret);
398 		return NULL;
399 	}
400 
401 	iovec = s->fast_iov;
402 	ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
403 			      req->ctx->compat);
404 	if (unlikely(ret < 0))
405 		return ERR_PTR(ret);
406 	return iovec;
407 }
408 
io_import_iovec(int rw,struct io_kiocb * req,struct iovec ** iovec,struct io_rw_state * s,unsigned int issue_flags)409 static inline int io_import_iovec(int rw, struct io_kiocb *req,
410 				  struct iovec **iovec, struct io_rw_state *s,
411 				  unsigned int issue_flags)
412 {
413 	*iovec = __io_import_iovec(rw, req, s, issue_flags);
414 	if (IS_ERR(*iovec))
415 		return PTR_ERR(*iovec);
416 
417 	iov_iter_save_state(&s->iter, &s->iter_state);
418 	return 0;
419 }
420 
io_kiocb_ppos(struct kiocb * kiocb)421 static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
422 {
423 	return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
424 }
425 
426 /*
427  * For files that don't have ->read_iter() and ->write_iter(), handle them
428  * by looping over ->read() or ->write() manually.
429  */
loop_rw_iter(int ddir,struct io_rw * rw,struct iov_iter * iter)430 static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
431 {
432 	struct kiocb *kiocb = &rw->kiocb;
433 	struct file *file = kiocb->ki_filp;
434 	ssize_t ret = 0;
435 	loff_t *ppos;
436 
437 	/*
438 	 * Don't support polled IO through this interface, and we can't
439 	 * support non-blocking either. For the latter, this just causes
440 	 * the kiocb to be handled from an async context.
441 	 */
442 	if (kiocb->ki_flags & IOCB_HIPRI)
443 		return -EOPNOTSUPP;
444 	if ((kiocb->ki_flags & IOCB_NOWAIT) &&
445 	    !(kiocb->ki_filp->f_flags & O_NONBLOCK))
446 		return -EAGAIN;
447 
448 	ppos = io_kiocb_ppos(kiocb);
449 
450 	while (iov_iter_count(iter)) {
451 		void __user *addr;
452 		size_t len;
453 		ssize_t nr;
454 
455 		if (iter_is_ubuf(iter)) {
456 			addr = iter->ubuf + iter->iov_offset;
457 			len = iov_iter_count(iter);
458 		} else if (!iov_iter_is_bvec(iter)) {
459 			addr = iter_iov_addr(iter);
460 			len = iter_iov_len(iter);
461 		} else {
462 			addr = u64_to_user_ptr(rw->addr);
463 			len = rw->len;
464 		}
465 
466 		if (ddir == READ)
467 			nr = file->f_op->read(file, addr, len, ppos);
468 		else
469 			nr = file->f_op->write(file, addr, len, ppos);
470 
471 		if (nr < 0) {
472 			if (!ret)
473 				ret = nr;
474 			break;
475 		}
476 		ret += nr;
477 		if (!iov_iter_is_bvec(iter)) {
478 			iov_iter_advance(iter, nr);
479 		} else {
480 			rw->addr += nr;
481 			rw->len -= nr;
482 			if (!rw->len)
483 				break;
484 		}
485 		if (nr != len)
486 			break;
487 	}
488 
489 	return ret;
490 }
491 
io_req_map_rw(struct io_kiocb * req,const struct iovec * iovec,const struct iovec * fast_iov,struct iov_iter * iter)492 static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
493 			  const struct iovec *fast_iov, struct iov_iter *iter)
494 {
495 	struct io_async_rw *io = req->async_data;
496 
497 	memcpy(&io->s.iter, iter, sizeof(*iter));
498 	io->free_iovec = iovec;
499 	io->bytes_done = 0;
500 	/* can only be fixed buffers, no need to do anything */
501 	if (iov_iter_is_bvec(iter) || iter_is_ubuf(iter))
502 		return;
503 	if (!iovec) {
504 		unsigned iov_off = 0;
505 
506 		io->s.iter.__iov = io->s.fast_iov;
507 		if (iter->__iov != fast_iov) {
508 			iov_off = iter_iov(iter) - fast_iov;
509 			io->s.iter.__iov += iov_off;
510 		}
511 		if (io->s.fast_iov != fast_iov)
512 			memcpy(io->s.fast_iov + iov_off, fast_iov + iov_off,
513 			       sizeof(struct iovec) * iter->nr_segs);
514 	} else {
515 		req->flags |= REQ_F_NEED_CLEANUP;
516 	}
517 }
518 
io_setup_async_rw(struct io_kiocb * req,const struct iovec * iovec,struct io_rw_state * s,bool force)519 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
520 			     struct io_rw_state *s, bool force)
521 {
522 	if (!force && !io_cold_defs[req->opcode].prep_async)
523 		return 0;
524 	if (!req_has_async_data(req)) {
525 		struct io_async_rw *iorw;
526 
527 		if (io_alloc_async_data(req)) {
528 			kfree(iovec);
529 			return -ENOMEM;
530 		}
531 
532 		io_req_map_rw(req, iovec, s->fast_iov, &s->iter);
533 		iorw = req->async_data;
534 		/* we've copied and mapped the iter, ensure state is saved */
535 		iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
536 	}
537 	return 0;
538 }
539 
io_rw_prep_async(struct io_kiocb * req,int rw)540 static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
541 {
542 	struct io_async_rw *iorw = req->async_data;
543 	struct iovec *iov;
544 	int ret;
545 
546 	iorw->bytes_done = 0;
547 	iorw->free_iovec = NULL;
548 
549 	/* submission path, ->uring_lock should already be taken */
550 	ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
551 	if (unlikely(ret < 0))
552 		return ret;
553 
554 	if (iov) {
555 		iorw->free_iovec = iov;
556 		req->flags |= REQ_F_NEED_CLEANUP;
557 	}
558 
559 	return 0;
560 }
561 
io_readv_prep_async(struct io_kiocb * req)562 int io_readv_prep_async(struct io_kiocb *req)
563 {
564 	return io_rw_prep_async(req, ITER_DEST);
565 }
566 
io_writev_prep_async(struct io_kiocb * req)567 int io_writev_prep_async(struct io_kiocb *req)
568 {
569 	return io_rw_prep_async(req, ITER_SOURCE);
570 }
571 
572 /*
573  * This is our waitqueue callback handler, registered through __folio_lock_async()
574  * when we initially tried to do the IO with the iocb armed our waitqueue.
575  * This gets called when the page is unlocked, and we generally expect that to
576  * happen when the page IO is completed and the page is now uptodate. This will
577  * queue a task_work based retry of the operation, attempting to copy the data
578  * again. If the latter fails because the page was NOT uptodate, then we will
579  * do a thread based blocking retry of the operation. That's the unexpected
580  * slow path.
581  */
io_async_buf_func(struct wait_queue_entry * wait,unsigned mode,int sync,void * arg)582 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
583 			     int sync, void *arg)
584 {
585 	struct wait_page_queue *wpq;
586 	struct io_kiocb *req = wait->private;
587 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
588 	struct wait_page_key *key = arg;
589 
590 	wpq = container_of(wait, struct wait_page_queue, wait);
591 
592 	if (!wake_page_match(wpq, key))
593 		return 0;
594 
595 	rw->kiocb.ki_flags &= ~IOCB_WAITQ;
596 	list_del_init(&wait->entry);
597 	io_req_task_queue(req);
598 	return 1;
599 }
600 
601 /*
602  * This controls whether a given IO request should be armed for async page
603  * based retry. If we return false here, the request is handed to the async
604  * worker threads for retry. If we're doing buffered reads on a regular file,
605  * we prepare a private wait_page_queue entry and retry the operation. This
606  * will either succeed because the page is now uptodate and unlocked, or it
607  * will register a callback when the page is unlocked at IO completion. Through
608  * that callback, io_uring uses task_work to setup a retry of the operation.
609  * That retry will attempt the buffered read again. The retry will generally
610  * succeed, or in rare cases where it fails, we then fall back to using the
611  * async worker threads for a blocking retry.
612  */
io_rw_should_retry(struct io_kiocb * req)613 static bool io_rw_should_retry(struct io_kiocb *req)
614 {
615 	struct io_async_rw *io = req->async_data;
616 	struct wait_page_queue *wait = &io->wpq;
617 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
618 	struct kiocb *kiocb = &rw->kiocb;
619 
620 	/* never retry for NOWAIT, we just complete with -EAGAIN */
621 	if (req->flags & REQ_F_NOWAIT)
622 		return false;
623 
624 	/* Only for buffered IO */
625 	if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
626 		return false;
627 
628 	/*
629 	 * just use poll if we can, and don't attempt if the fs doesn't
630 	 * support callback based unlocks
631 	 */
632 	if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
633 		return false;
634 
635 	wait->wait.func = io_async_buf_func;
636 	wait->wait.private = req;
637 	wait->wait.flags = 0;
638 	INIT_LIST_HEAD(&wait->wait.entry);
639 	kiocb->ki_flags |= IOCB_WAITQ;
640 	kiocb->ki_flags &= ~IOCB_NOWAIT;
641 	kiocb->ki_waitq = wait;
642 	return true;
643 }
644 
io_iter_do_read(struct io_rw * rw,struct iov_iter * iter)645 static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter)
646 {
647 	struct file *file = rw->kiocb.ki_filp;
648 
649 	if (likely(file->f_op->read_iter))
650 		return call_read_iter(file, &rw->kiocb, iter);
651 	else if (file->f_op->read)
652 		return loop_rw_iter(READ, rw, iter);
653 	else
654 		return -EINVAL;
655 }
656 
need_complete_io(struct io_kiocb * req)657 static bool need_complete_io(struct io_kiocb *req)
658 {
659 	return req->flags & REQ_F_ISREG ||
660 		S_ISBLK(file_inode(req->file)->i_mode);
661 }
662 
io_rw_init_file(struct io_kiocb * req,fmode_t mode)663 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
664 {
665 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
666 	struct kiocb *kiocb = &rw->kiocb;
667 	struct io_ring_ctx *ctx = req->ctx;
668 	struct file *file = req->file;
669 	int ret;
670 
671 	if (unlikely(!file || !(file->f_mode & mode)))
672 		return -EBADF;
673 
674 	if (!(req->flags & REQ_F_FIXED_FILE))
675 		req->flags |= io_file_get_flags(file);
676 
677 	kiocb->ki_flags = file->f_iocb_flags;
678 	ret = kiocb_set_rw_flags(kiocb, rw->flags);
679 	if (unlikely(ret))
680 		return ret;
681 	kiocb->ki_flags |= IOCB_ALLOC_CACHE;
682 
683 	/*
684 	 * If the file is marked O_NONBLOCK, still allow retry for it if it
685 	 * supports async. Otherwise it's impossible to use O_NONBLOCK files
686 	 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
687 	 */
688 	if ((kiocb->ki_flags & IOCB_NOWAIT) ||
689 	    ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
690 		req->flags |= REQ_F_NOWAIT;
691 
692 	if (ctx->flags & IORING_SETUP_IOPOLL) {
693 		if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
694 			return -EOPNOTSUPP;
695 
696 		kiocb->private = NULL;
697 		kiocb->ki_flags |= IOCB_HIPRI;
698 		kiocb->ki_complete = io_complete_rw_iopoll;
699 		req->iopoll_completed = 0;
700 	} else {
701 		if (kiocb->ki_flags & IOCB_HIPRI)
702 			return -EINVAL;
703 		kiocb->ki_complete = io_complete_rw;
704 	}
705 
706 	return 0;
707 }
708 
__io_read(struct io_kiocb * req,unsigned int issue_flags)709 static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
710 {
711 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
712 	struct io_rw_state __s, *s = &__s;
713 	struct iovec *iovec;
714 	struct kiocb *kiocb = &rw->kiocb;
715 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
716 	struct io_async_rw *io;
717 	ssize_t ret, ret2;
718 	loff_t *ppos;
719 
720 	if (!req_has_async_data(req)) {
721 		ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
722 		if (unlikely(ret < 0))
723 			return ret;
724 	} else {
725 		io = req->async_data;
726 		s = &io->s;
727 
728 		/*
729 		 * Safe and required to re-import if we're using provided
730 		 * buffers, as we dropped the selected one before retry.
731 		 */
732 		if (io_do_buffer_select(req)) {
733 			ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
734 			if (unlikely(ret < 0))
735 				return ret;
736 		}
737 
738 		/*
739 		 * We come here from an earlier attempt, restore our state to
740 		 * match in case it doesn't. It's cheap enough that we don't
741 		 * need to make this conditional.
742 		 */
743 		iov_iter_restore(&s->iter, &s->iter_state);
744 		iovec = NULL;
745 	}
746 	ret = io_rw_init_file(req, FMODE_READ);
747 	if (unlikely(ret)) {
748 		kfree(iovec);
749 		return ret;
750 	}
751 	req->cqe.res = iov_iter_count(&s->iter);
752 
753 	if (force_nonblock) {
754 		/* If the file doesn't support async, just async punt */
755 		if (unlikely(!io_file_supports_nowait(req))) {
756 			ret = io_setup_async_rw(req, iovec, s, true);
757 			return ret ?: -EAGAIN;
758 		}
759 		kiocb->ki_flags |= IOCB_NOWAIT;
760 	} else {
761 		/* Ensure we clear previously set non-block flag */
762 		kiocb->ki_flags &= ~IOCB_NOWAIT;
763 	}
764 
765 	ppos = io_kiocb_update_pos(req);
766 
767 	ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
768 	if (unlikely(ret)) {
769 		kfree(iovec);
770 		return ret;
771 	}
772 
773 	ret = io_iter_do_read(rw, &s->iter);
774 
775 	/*
776 	 * Some file systems like to return -EOPNOTSUPP for an IOCB_NOWAIT
777 	 * issue, even though they should be returning -EAGAIN. To be safe,
778 	 * retry from blocking context for either.
779 	 */
780 	if (ret == -EOPNOTSUPP && force_nonblock)
781 		ret = -EAGAIN;
782 
783 	if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
784 		req->flags &= ~REQ_F_REISSUE;
785 		/* if we can poll, just do that */
786 		if (req->opcode == IORING_OP_READ && file_can_poll(req->file))
787 			return -EAGAIN;
788 		/* IOPOLL retry should happen for io-wq threads */
789 		if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
790 			goto done;
791 		/* no retry on NONBLOCK nor RWF_NOWAIT */
792 		if (req->flags & REQ_F_NOWAIT)
793 			goto done;
794 		ret = 0;
795 	} else if (ret == -EIOCBQUEUED) {
796 		req->flags |= REQ_F_PARTIAL_IO;
797 		io_kbuf_recycle(req, issue_flags);
798 		if (iovec)
799 			kfree(iovec);
800 		return IOU_ISSUE_SKIP_COMPLETE;
801 	} else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
802 		   (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) {
803 		/* read all, failed, already did sync or don't want to retry */
804 		goto done;
805 	}
806 
807 	/*
808 	 * Don't depend on the iter state matching what was consumed, or being
809 	 * untouched in case of error. Restore it and we'll advance it
810 	 * manually if we need to.
811 	 */
812 	iov_iter_restore(&s->iter, &s->iter_state);
813 
814 	ret2 = io_setup_async_rw(req, iovec, s, true);
815 	iovec = NULL;
816 	if (ret2) {
817 		ret = ret > 0 ? ret : ret2;
818 		goto done;
819 	}
820 
821 	req->flags |= REQ_F_PARTIAL_IO;
822 	io_kbuf_recycle(req, issue_flags);
823 
824 	io = req->async_data;
825 	s = &io->s;
826 	/*
827 	 * Now use our persistent iterator and state, if we aren't already.
828 	 * We've restored and mapped the iter to match.
829 	 */
830 
831 	do {
832 		/*
833 		 * We end up here because of a partial read, either from
834 		 * above or inside this loop. Advance the iter by the bytes
835 		 * that were consumed.
836 		 */
837 		iov_iter_advance(&s->iter, ret);
838 		if (!iov_iter_count(&s->iter))
839 			break;
840 		io->bytes_done += ret;
841 		iov_iter_save_state(&s->iter, &s->iter_state);
842 
843 		/* if we can retry, do so with the callbacks armed */
844 		if (!io_rw_should_retry(req)) {
845 			kiocb->ki_flags &= ~IOCB_WAITQ;
846 			return -EAGAIN;
847 		}
848 
849 		req->cqe.res = iov_iter_count(&s->iter);
850 		/*
851 		 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
852 		 * we get -EIOCBQUEUED, then we'll get a notification when the
853 		 * desired page gets unlocked. We can also get a partial read
854 		 * here, and if we do, then just retry at the new offset.
855 		 */
856 		ret = io_iter_do_read(rw, &s->iter);
857 		if (ret == -EIOCBQUEUED)
858 			return IOU_ISSUE_SKIP_COMPLETE;
859 		/* we got some bytes, but not all. retry. */
860 		kiocb->ki_flags &= ~IOCB_WAITQ;
861 		iov_iter_restore(&s->iter, &s->iter_state);
862 	} while (ret > 0);
863 done:
864 	/* it's faster to check here then delegate to kfree */
865 	if (iovec)
866 		kfree(iovec);
867 	return ret;
868 }
869 
io_read(struct io_kiocb * req,unsigned int issue_flags)870 int io_read(struct io_kiocb *req, unsigned int issue_flags)
871 {
872 	int ret;
873 
874 	ret = __io_read(req, issue_flags);
875 	if (ret >= 0)
876 		return kiocb_done(req, ret, issue_flags);
877 
878 	return ret;
879 }
880 
io_kiocb_start_write(struct io_kiocb * req,struct kiocb * kiocb)881 static bool io_kiocb_start_write(struct io_kiocb *req, struct kiocb *kiocb)
882 {
883 	struct inode *inode;
884 	bool ret;
885 
886 	if (!(req->flags & REQ_F_ISREG))
887 		return true;
888 	if (!(kiocb->ki_flags & IOCB_NOWAIT)) {
889 		kiocb_start_write(kiocb);
890 		return true;
891 	}
892 
893 	inode = file_inode(kiocb->ki_filp);
894 	ret = sb_start_write_trylock(inode->i_sb);
895 	if (ret)
896 		__sb_writers_release(inode->i_sb, SB_FREEZE_WRITE);
897 	return ret;
898 }
899 
io_write(struct io_kiocb * req,unsigned int issue_flags)900 int io_write(struct io_kiocb *req, unsigned int issue_flags)
901 {
902 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
903 	struct io_rw_state __s, *s = &__s;
904 	struct iovec *iovec;
905 	struct kiocb *kiocb = &rw->kiocb;
906 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
907 	ssize_t ret, ret2;
908 	loff_t *ppos;
909 
910 	if (!req_has_async_data(req)) {
911 		ret = io_import_iovec(ITER_SOURCE, req, &iovec, s, issue_flags);
912 		if (unlikely(ret < 0))
913 			return ret;
914 	} else {
915 		struct io_async_rw *io = req->async_data;
916 
917 		s = &io->s;
918 		iov_iter_restore(&s->iter, &s->iter_state);
919 		iovec = NULL;
920 	}
921 	ret = io_rw_init_file(req, FMODE_WRITE);
922 	if (unlikely(ret)) {
923 		kfree(iovec);
924 		return ret;
925 	}
926 	req->cqe.res = iov_iter_count(&s->iter);
927 
928 	if (force_nonblock) {
929 		/* If the file doesn't support async, just async punt */
930 		if (unlikely(!io_file_supports_nowait(req)))
931 			goto copy_iov;
932 
933 		/* File path supports NOWAIT for non-direct_IO only for block devices. */
934 		if (!(kiocb->ki_flags & IOCB_DIRECT) &&
935 			!(kiocb->ki_filp->f_mode & FMODE_BUF_WASYNC) &&
936 			(req->flags & REQ_F_ISREG))
937 			goto copy_iov;
938 
939 		kiocb->ki_flags |= IOCB_NOWAIT;
940 	} else {
941 		/* Ensure we clear previously set non-block flag */
942 		kiocb->ki_flags &= ~IOCB_NOWAIT;
943 	}
944 
945 	ppos = io_kiocb_update_pos(req);
946 
947 	ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
948 	if (unlikely(ret)) {
949 		kfree(iovec);
950 		return ret;
951 	}
952 
953 	if (unlikely(!io_kiocb_start_write(req, kiocb)))
954 		return -EAGAIN;
955 	kiocb->ki_flags |= IOCB_WRITE;
956 
957 	if (likely(req->file->f_op->write_iter))
958 		ret2 = call_write_iter(req->file, kiocb, &s->iter);
959 	else if (req->file->f_op->write)
960 		ret2 = loop_rw_iter(WRITE, rw, &s->iter);
961 	else
962 		ret2 = -EINVAL;
963 
964 	if (ret2 == -EIOCBQUEUED) {
965 		req->flags |= REQ_F_PARTIAL_IO;
966 		io_kbuf_recycle(req, issue_flags);
967 	}
968 
969 	if (req->flags & REQ_F_REISSUE) {
970 		req->flags &= ~REQ_F_REISSUE;
971 		ret2 = -EAGAIN;
972 	}
973 
974 	/*
975 	 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
976 	 * retry them without IOCB_NOWAIT.
977 	 */
978 	if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
979 		ret2 = -EAGAIN;
980 	/* no retry on NONBLOCK nor RWF_NOWAIT */
981 	if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
982 		goto done;
983 	if (!force_nonblock || ret2 != -EAGAIN) {
984 		/* IOPOLL retry should happen for io-wq threads */
985 		if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
986 			goto copy_iov;
987 
988 		if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) {
989 			struct io_async_rw *io;
990 
991 			trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2,
992 						req->cqe.res, ret2);
993 
994 			/* This is a partial write. The file pos has already been
995 			 * updated, setup the async struct to complete the request
996 			 * in the worker. Also update bytes_done to account for
997 			 * the bytes already written.
998 			 */
999 			iov_iter_save_state(&s->iter, &s->iter_state);
1000 			ret = io_setup_async_rw(req, iovec, s, true);
1001 
1002 			io = req->async_data;
1003 			if (io)
1004 				io->bytes_done += ret2;
1005 
1006 			if (kiocb->ki_flags & IOCB_WRITE)
1007 				io_req_end_write(req);
1008 			return ret ? ret : -EAGAIN;
1009 		}
1010 done:
1011 		ret = kiocb_done(req, ret2, issue_flags);
1012 	} else {
1013 copy_iov:
1014 		iov_iter_restore(&s->iter, &s->iter_state);
1015 		ret = io_setup_async_rw(req, iovec, s, false);
1016 		if (!ret) {
1017 			if (kiocb->ki_flags & IOCB_WRITE)
1018 				io_req_end_write(req);
1019 			return -EAGAIN;
1020 		}
1021 		return ret;
1022 	}
1023 	/* it's reportedly faster than delegating the null check to kfree() */
1024 	if (iovec)
1025 		kfree(iovec);
1026 	return ret;
1027 }
1028 
io_rw_fail(struct io_kiocb * req)1029 void io_rw_fail(struct io_kiocb *req)
1030 {
1031 	int res;
1032 
1033 	res = io_fixup_rw_res(req, req->cqe.res);
1034 	io_req_set_res(req, res, req->cqe.flags);
1035 }
1036 
io_do_iopoll(struct io_ring_ctx * ctx,bool force_nonspin)1037 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
1038 {
1039 	struct io_wq_work_node *pos, *start, *prev;
1040 	unsigned int poll_flags = 0;
1041 	DEFINE_IO_COMP_BATCH(iob);
1042 	int nr_events = 0;
1043 
1044 	/*
1045 	 * Only spin for completions if we don't have multiple devices hanging
1046 	 * off our complete list.
1047 	 */
1048 	if (ctx->poll_multi_queue || force_nonspin)
1049 		poll_flags |= BLK_POLL_ONESHOT;
1050 
1051 	wq_list_for_each(pos, start, &ctx->iopoll_list) {
1052 		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1053 		struct file *file = req->file;
1054 		int ret;
1055 
1056 		/*
1057 		 * Move completed and retryable entries to our local lists.
1058 		 * If we find a request that requires polling, break out
1059 		 * and complete those lists first, if we have entries there.
1060 		 */
1061 		if (READ_ONCE(req->iopoll_completed))
1062 			break;
1063 
1064 		if (req->opcode == IORING_OP_URING_CMD) {
1065 			struct io_uring_cmd *ioucmd;
1066 
1067 			ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
1068 			ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob,
1069 								poll_flags);
1070 		} else {
1071 			struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1072 
1073 			ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
1074 		}
1075 		if (unlikely(ret < 0))
1076 			return ret;
1077 		else if (ret)
1078 			poll_flags |= BLK_POLL_ONESHOT;
1079 
1080 		/* iopoll may have completed current req */
1081 		if (!rq_list_empty(iob.req_list) ||
1082 		    READ_ONCE(req->iopoll_completed))
1083 			break;
1084 	}
1085 
1086 	if (!rq_list_empty(iob.req_list))
1087 		iob.complete(&iob);
1088 	else if (!pos)
1089 		return 0;
1090 
1091 	prev = start;
1092 	wq_list_for_each_resume(pos, prev) {
1093 		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1094 
1095 		/* order with io_complete_rw_iopoll(), e.g. ->result updates */
1096 		if (!smp_load_acquire(&req->iopoll_completed))
1097 			break;
1098 		nr_events++;
1099 		req->cqe.flags = io_put_kbuf(req, 0);
1100 	}
1101 	if (unlikely(!nr_events))
1102 		return 0;
1103 
1104 	pos = start ? start->next : ctx->iopoll_list.first;
1105 	wq_list_cut(&ctx->iopoll_list, prev, start);
1106 
1107 	if (WARN_ON_ONCE(!wq_list_empty(&ctx->submit_state.compl_reqs)))
1108 		return 0;
1109 	ctx->submit_state.compl_reqs.first = pos;
1110 	__io_submit_flush_completions(ctx);
1111 	return nr_events;
1112 }
1113