xref: /openbmc/linux/io_uring/rw.c (revision 099ada2c)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/blk-mq.h>
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/fsnotify.h>
10 #include <linux/poll.h>
11 #include <linux/nospec.h>
12 #include <linux/compat.h>
13 #include <linux/io_uring.h>
14 
15 #include <uapi/linux/io_uring.h>
16 
17 #include "io_uring.h"
18 #include "opdef.h"
19 #include "kbuf.h"
20 #include "rsrc.h"
21 #include "rw.h"
22 
23 struct io_rw {
24 	/* NOTE: kiocb has the file as the first member, so don't do it here */
25 	struct kiocb			kiocb;
26 	u64				addr;
27 	u32				len;
28 	rwf_t				flags;
29 };
30 
31 static inline bool io_file_supports_nowait(struct io_kiocb *req)
32 {
33 	return req->flags & REQ_F_SUPPORT_NOWAIT;
34 }
35 
36 #ifdef CONFIG_COMPAT
37 static int io_iov_compat_buffer_select_prep(struct io_rw *rw)
38 {
39 	struct compat_iovec __user *uiov;
40 	compat_ssize_t clen;
41 
42 	uiov = u64_to_user_ptr(rw->addr);
43 	if (!access_ok(uiov, sizeof(*uiov)))
44 		return -EFAULT;
45 	if (__get_user(clen, &uiov->iov_len))
46 		return -EFAULT;
47 	if (clen < 0)
48 		return -EINVAL;
49 
50 	rw->len = clen;
51 	return 0;
52 }
53 #endif
54 
55 static int io_iov_buffer_select_prep(struct io_kiocb *req)
56 {
57 	struct iovec __user *uiov;
58 	struct iovec iov;
59 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
60 
61 	if (rw->len != 1)
62 		return -EINVAL;
63 
64 #ifdef CONFIG_COMPAT
65 	if (req->ctx->compat)
66 		return io_iov_compat_buffer_select_prep(rw);
67 #endif
68 
69 	uiov = u64_to_user_ptr(rw->addr);
70 	if (copy_from_user(&iov, uiov, sizeof(*uiov)))
71 		return -EFAULT;
72 	rw->len = iov.iov_len;
73 	return 0;
74 }
75 
76 int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
77 {
78 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
79 	unsigned ioprio;
80 	int ret;
81 
82 	rw->kiocb.ki_pos = READ_ONCE(sqe->off);
83 	/* used for fixed read/write too - just read unconditionally */
84 	req->buf_index = READ_ONCE(sqe->buf_index);
85 
86 	if (req->opcode == IORING_OP_READ_FIXED ||
87 	    req->opcode == IORING_OP_WRITE_FIXED) {
88 		struct io_ring_ctx *ctx = req->ctx;
89 		u16 index;
90 
91 		if (unlikely(req->buf_index >= ctx->nr_user_bufs))
92 			return -EFAULT;
93 		index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
94 		req->imu = ctx->user_bufs[index];
95 		io_req_set_rsrc_node(req, ctx, 0);
96 	}
97 
98 	ioprio = READ_ONCE(sqe->ioprio);
99 	if (ioprio) {
100 		ret = ioprio_check_cap(ioprio);
101 		if (ret)
102 			return ret;
103 
104 		rw->kiocb.ki_ioprio = ioprio;
105 	} else {
106 		rw->kiocb.ki_ioprio = get_current_ioprio();
107 	}
108 	rw->kiocb.dio_complete = NULL;
109 
110 	rw->addr = READ_ONCE(sqe->addr);
111 	rw->len = READ_ONCE(sqe->len);
112 	rw->flags = READ_ONCE(sqe->rw_flags);
113 
114 	/* Have to do this validation here, as this is in io_read() rw->len might
115 	 * have chanaged due to buffer selection
116 	 */
117 	if (req->opcode == IORING_OP_READV && req->flags & REQ_F_BUFFER_SELECT) {
118 		ret = io_iov_buffer_select_prep(req);
119 		if (ret)
120 			return ret;
121 	}
122 
123 	return 0;
124 }
125 
126 void io_readv_writev_cleanup(struct io_kiocb *req)
127 {
128 	struct io_async_rw *io = req->async_data;
129 
130 	kfree(io->free_iovec);
131 }
132 
133 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
134 {
135 	switch (ret) {
136 	case -EIOCBQUEUED:
137 		break;
138 	case -ERESTARTSYS:
139 	case -ERESTARTNOINTR:
140 	case -ERESTARTNOHAND:
141 	case -ERESTART_RESTARTBLOCK:
142 		/*
143 		 * We can't just restart the syscall, since previously
144 		 * submitted sqes may already be in progress. Just fail this
145 		 * IO with EINTR.
146 		 */
147 		ret = -EINTR;
148 		fallthrough;
149 	default:
150 		kiocb->ki_complete(kiocb, ret);
151 	}
152 }
153 
154 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
155 {
156 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
157 
158 	if (rw->kiocb.ki_pos != -1)
159 		return &rw->kiocb.ki_pos;
160 
161 	if (!(req->file->f_mode & FMODE_STREAM)) {
162 		req->flags |= REQ_F_CUR_POS;
163 		rw->kiocb.ki_pos = req->file->f_pos;
164 		return &rw->kiocb.ki_pos;
165 	}
166 
167 	rw->kiocb.ki_pos = 0;
168 	return NULL;
169 }
170 
171 static void io_req_task_queue_reissue(struct io_kiocb *req)
172 {
173 	req->io_task_work.func = io_queue_iowq;
174 	io_req_task_work_add(req);
175 }
176 
177 #ifdef CONFIG_BLOCK
178 static bool io_resubmit_prep(struct io_kiocb *req)
179 {
180 	struct io_async_rw *io = req->async_data;
181 
182 	if (!req_has_async_data(req))
183 		return !io_req_prep_async(req);
184 	iov_iter_restore(&io->s.iter, &io->s.iter_state);
185 	return true;
186 }
187 
188 static bool io_rw_should_reissue(struct io_kiocb *req)
189 {
190 	umode_t mode = file_inode(req->file)->i_mode;
191 	struct io_ring_ctx *ctx = req->ctx;
192 
193 	if (!S_ISBLK(mode) && !S_ISREG(mode))
194 		return false;
195 	if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
196 	    !(ctx->flags & IORING_SETUP_IOPOLL)))
197 		return false;
198 	/*
199 	 * If ref is dying, we might be running poll reap from the exit work.
200 	 * Don't attempt to reissue from that path, just let it fail with
201 	 * -EAGAIN.
202 	 */
203 	if (percpu_ref_is_dying(&ctx->refs))
204 		return false;
205 	/*
206 	 * Play it safe and assume not safe to re-import and reissue if we're
207 	 * not in the original thread group (or in task context).
208 	 */
209 	if (!same_thread_group(req->task, current) || !in_task())
210 		return false;
211 	return true;
212 }
213 #else
214 static bool io_resubmit_prep(struct io_kiocb *req)
215 {
216 	return false;
217 }
218 static bool io_rw_should_reissue(struct io_kiocb *req)
219 {
220 	return false;
221 }
222 #endif
223 
224 static void kiocb_end_write(struct io_kiocb *req)
225 {
226 	/*
227 	 * Tell lockdep we inherited freeze protection from submission
228 	 * thread.
229 	 */
230 	if (req->flags & REQ_F_ISREG) {
231 		struct super_block *sb = file_inode(req->file)->i_sb;
232 
233 		__sb_writers_acquired(sb, SB_FREEZE_WRITE);
234 		sb_end_write(sb);
235 	}
236 }
237 
238 /*
239  * Trigger the notifications after having done some IO, and finish the write
240  * accounting, if any.
241  */
242 static void io_req_io_end(struct io_kiocb *req)
243 {
244 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
245 
246 	if (rw->kiocb.ki_flags & IOCB_WRITE) {
247 		kiocb_end_write(req);
248 		fsnotify_modify(req->file);
249 	} else {
250 		fsnotify_access(req->file);
251 	}
252 }
253 
254 static bool __io_complete_rw_common(struct io_kiocb *req, long res)
255 {
256 	if (unlikely(res != req->cqe.res)) {
257 		if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
258 		    io_rw_should_reissue(req)) {
259 			/*
260 			 * Reissue will start accounting again, finish the
261 			 * current cycle.
262 			 */
263 			io_req_io_end(req);
264 			req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
265 			return true;
266 		}
267 		req_set_fail(req);
268 		req->cqe.res = res;
269 	}
270 	return false;
271 }
272 
273 static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
274 {
275 	struct io_async_rw *io = req->async_data;
276 
277 	/* add previously done IO, if any */
278 	if (req_has_async_data(req) && io->bytes_done > 0) {
279 		if (res < 0)
280 			res = io->bytes_done;
281 		else
282 			res += io->bytes_done;
283 	}
284 	return res;
285 }
286 
287 void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts)
288 {
289 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
290 	struct kiocb *kiocb = &rw->kiocb;
291 
292 	if ((kiocb->ki_flags & IOCB_DIO_CALLER_COMP) && kiocb->dio_complete) {
293 		long res = kiocb->dio_complete(rw->kiocb.private);
294 
295 		io_req_set_res(req, io_fixup_rw_res(req, res), 0);
296 	}
297 
298 	io_req_io_end(req);
299 
300 	if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
301 		unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED;
302 
303 		req->cqe.flags |= io_put_kbuf(req, issue_flags);
304 	}
305 	io_req_task_complete(req, ts);
306 }
307 
308 static void io_complete_rw(struct kiocb *kiocb, long res)
309 {
310 	struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
311 	struct io_kiocb *req = cmd_to_io_kiocb(rw);
312 
313 	if (!kiocb->dio_complete || !(kiocb->ki_flags & IOCB_DIO_CALLER_COMP)) {
314 		if (__io_complete_rw_common(req, res))
315 			return;
316 		io_req_set_res(req, io_fixup_rw_res(req, res), 0);
317 	}
318 	req->io_task_work.func = io_req_rw_complete;
319 	__io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE);
320 }
321 
322 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
323 {
324 	struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
325 	struct io_kiocb *req = cmd_to_io_kiocb(rw);
326 
327 	if (kiocb->ki_flags & IOCB_WRITE)
328 		kiocb_end_write(req);
329 	if (unlikely(res != req->cqe.res)) {
330 		if (res == -EAGAIN && io_rw_should_reissue(req)) {
331 			req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
332 			return;
333 		}
334 		req->cqe.res = res;
335 	}
336 
337 	/* order with io_iopoll_complete() checking ->iopoll_completed */
338 	smp_store_release(&req->iopoll_completed, 1);
339 }
340 
341 static int kiocb_done(struct io_kiocb *req, ssize_t ret,
342 		       unsigned int issue_flags)
343 {
344 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
345 	unsigned final_ret = io_fixup_rw_res(req, ret);
346 
347 	if (req->flags & REQ_F_CUR_POS)
348 		req->file->f_pos = rw->kiocb.ki_pos;
349 	if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
350 		if (!__io_complete_rw_common(req, ret)) {
351 			/*
352 			 * Safe to call io_end from here as we're inline
353 			 * from the submission path.
354 			 */
355 			io_req_io_end(req);
356 			io_req_set_res(req, final_ret,
357 				       io_put_kbuf(req, issue_flags));
358 			return IOU_OK;
359 		}
360 	} else {
361 		io_rw_done(&rw->kiocb, ret);
362 	}
363 
364 	if (req->flags & REQ_F_REISSUE) {
365 		req->flags &= ~REQ_F_REISSUE;
366 		if (io_resubmit_prep(req))
367 			io_req_task_queue_reissue(req);
368 		else
369 			io_req_task_queue_fail(req, final_ret);
370 	}
371 	return IOU_ISSUE_SKIP_COMPLETE;
372 }
373 
374 static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
375 				       struct io_rw_state *s,
376 				       unsigned int issue_flags)
377 {
378 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
379 	struct iov_iter *iter = &s->iter;
380 	u8 opcode = req->opcode;
381 	struct iovec *iovec;
382 	void __user *buf;
383 	size_t sqe_len;
384 	ssize_t ret;
385 
386 	if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
387 		ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len);
388 		if (ret)
389 			return ERR_PTR(ret);
390 		return NULL;
391 	}
392 
393 	buf = u64_to_user_ptr(rw->addr);
394 	sqe_len = rw->len;
395 
396 	if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE ||
397 	    (req->flags & REQ_F_BUFFER_SELECT)) {
398 		if (io_do_buffer_select(req)) {
399 			buf = io_buffer_select(req, &sqe_len, issue_flags);
400 			if (!buf)
401 				return ERR_PTR(-ENOBUFS);
402 			rw->addr = (unsigned long) buf;
403 			rw->len = sqe_len;
404 		}
405 
406 		ret = import_ubuf(ddir, buf, sqe_len, iter);
407 		if (ret)
408 			return ERR_PTR(ret);
409 		return NULL;
410 	}
411 
412 	iovec = s->fast_iov;
413 	ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
414 			      req->ctx->compat);
415 	if (unlikely(ret < 0))
416 		return ERR_PTR(ret);
417 	return iovec;
418 }
419 
420 static inline int io_import_iovec(int rw, struct io_kiocb *req,
421 				  struct iovec **iovec, struct io_rw_state *s,
422 				  unsigned int issue_flags)
423 {
424 	*iovec = __io_import_iovec(rw, req, s, issue_flags);
425 	if (IS_ERR(*iovec))
426 		return PTR_ERR(*iovec);
427 
428 	iov_iter_save_state(&s->iter, &s->iter_state);
429 	return 0;
430 }
431 
432 static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
433 {
434 	return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
435 }
436 
437 /*
438  * For files that don't have ->read_iter() and ->write_iter(), handle them
439  * by looping over ->read() or ->write() manually.
440  */
441 static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
442 {
443 	struct kiocb *kiocb = &rw->kiocb;
444 	struct file *file = kiocb->ki_filp;
445 	ssize_t ret = 0;
446 	loff_t *ppos;
447 
448 	/*
449 	 * Don't support polled IO through this interface, and we can't
450 	 * support non-blocking either. For the latter, this just causes
451 	 * the kiocb to be handled from an async context.
452 	 */
453 	if (kiocb->ki_flags & IOCB_HIPRI)
454 		return -EOPNOTSUPP;
455 	if ((kiocb->ki_flags & IOCB_NOWAIT) &&
456 	    !(kiocb->ki_filp->f_flags & O_NONBLOCK))
457 		return -EAGAIN;
458 
459 	ppos = io_kiocb_ppos(kiocb);
460 
461 	while (iov_iter_count(iter)) {
462 		void __user *addr;
463 		size_t len;
464 		ssize_t nr;
465 
466 		if (iter_is_ubuf(iter)) {
467 			addr = iter->ubuf + iter->iov_offset;
468 			len = iov_iter_count(iter);
469 		} else if (!iov_iter_is_bvec(iter)) {
470 			addr = iter_iov_addr(iter);
471 			len = iter_iov_len(iter);
472 		} else {
473 			addr = u64_to_user_ptr(rw->addr);
474 			len = rw->len;
475 		}
476 
477 		if (ddir == READ)
478 			nr = file->f_op->read(file, addr, len, ppos);
479 		else
480 			nr = file->f_op->write(file, addr, len, ppos);
481 
482 		if (nr < 0) {
483 			if (!ret)
484 				ret = nr;
485 			break;
486 		}
487 		ret += nr;
488 		if (!iov_iter_is_bvec(iter)) {
489 			iov_iter_advance(iter, nr);
490 		} else {
491 			rw->addr += nr;
492 			rw->len -= nr;
493 			if (!rw->len)
494 				break;
495 		}
496 		if (nr != len)
497 			break;
498 	}
499 
500 	return ret;
501 }
502 
503 static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
504 			  const struct iovec *fast_iov, struct iov_iter *iter)
505 {
506 	struct io_async_rw *io = req->async_data;
507 
508 	memcpy(&io->s.iter, iter, sizeof(*iter));
509 	io->free_iovec = iovec;
510 	io->bytes_done = 0;
511 	/* can only be fixed buffers, no need to do anything */
512 	if (iov_iter_is_bvec(iter) || iter_is_ubuf(iter))
513 		return;
514 	if (!iovec) {
515 		unsigned iov_off = 0;
516 
517 		io->s.iter.__iov = io->s.fast_iov;
518 		if (iter->__iov != fast_iov) {
519 			iov_off = iter_iov(iter) - fast_iov;
520 			io->s.iter.__iov += iov_off;
521 		}
522 		if (io->s.fast_iov != fast_iov)
523 			memcpy(io->s.fast_iov + iov_off, fast_iov + iov_off,
524 			       sizeof(struct iovec) * iter->nr_segs);
525 	} else {
526 		req->flags |= REQ_F_NEED_CLEANUP;
527 	}
528 }
529 
530 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
531 			     struct io_rw_state *s, bool force)
532 {
533 	if (!force && !io_cold_defs[req->opcode].prep_async)
534 		return 0;
535 	if (!req_has_async_data(req)) {
536 		struct io_async_rw *iorw;
537 
538 		if (io_alloc_async_data(req)) {
539 			kfree(iovec);
540 			return -ENOMEM;
541 		}
542 
543 		io_req_map_rw(req, iovec, s->fast_iov, &s->iter);
544 		iorw = req->async_data;
545 		/* we've copied and mapped the iter, ensure state is saved */
546 		iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
547 	}
548 	return 0;
549 }
550 
551 static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
552 {
553 	struct io_async_rw *iorw = req->async_data;
554 	struct iovec *iov;
555 	int ret;
556 
557 	/* submission path, ->uring_lock should already be taken */
558 	ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
559 	if (unlikely(ret < 0))
560 		return ret;
561 
562 	iorw->bytes_done = 0;
563 	iorw->free_iovec = iov;
564 	if (iov)
565 		req->flags |= REQ_F_NEED_CLEANUP;
566 	return 0;
567 }
568 
569 int io_readv_prep_async(struct io_kiocb *req)
570 {
571 	return io_rw_prep_async(req, ITER_DEST);
572 }
573 
574 int io_writev_prep_async(struct io_kiocb *req)
575 {
576 	return io_rw_prep_async(req, ITER_SOURCE);
577 }
578 
579 /*
580  * This is our waitqueue callback handler, registered through __folio_lock_async()
581  * when we initially tried to do the IO with the iocb armed our waitqueue.
582  * This gets called when the page is unlocked, and we generally expect that to
583  * happen when the page IO is completed and the page is now uptodate. This will
584  * queue a task_work based retry of the operation, attempting to copy the data
585  * again. If the latter fails because the page was NOT uptodate, then we will
586  * do a thread based blocking retry of the operation. That's the unexpected
587  * slow path.
588  */
589 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
590 			     int sync, void *arg)
591 {
592 	struct wait_page_queue *wpq;
593 	struct io_kiocb *req = wait->private;
594 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
595 	struct wait_page_key *key = arg;
596 
597 	wpq = container_of(wait, struct wait_page_queue, wait);
598 
599 	if (!wake_page_match(wpq, key))
600 		return 0;
601 
602 	rw->kiocb.ki_flags &= ~IOCB_WAITQ;
603 	list_del_init(&wait->entry);
604 	io_req_task_queue(req);
605 	return 1;
606 }
607 
608 /*
609  * This controls whether a given IO request should be armed for async page
610  * based retry. If we return false here, the request is handed to the async
611  * worker threads for retry. If we're doing buffered reads on a regular file,
612  * we prepare a private wait_page_queue entry and retry the operation. This
613  * will either succeed because the page is now uptodate and unlocked, or it
614  * will register a callback when the page is unlocked at IO completion. Through
615  * that callback, io_uring uses task_work to setup a retry of the operation.
616  * That retry will attempt the buffered read again. The retry will generally
617  * succeed, or in rare cases where it fails, we then fall back to using the
618  * async worker threads for a blocking retry.
619  */
620 static bool io_rw_should_retry(struct io_kiocb *req)
621 {
622 	struct io_async_rw *io = req->async_data;
623 	struct wait_page_queue *wait = &io->wpq;
624 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
625 	struct kiocb *kiocb = &rw->kiocb;
626 
627 	/* never retry for NOWAIT, we just complete with -EAGAIN */
628 	if (req->flags & REQ_F_NOWAIT)
629 		return false;
630 
631 	/* Only for buffered IO */
632 	if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
633 		return false;
634 
635 	/*
636 	 * just use poll if we can, and don't attempt if the fs doesn't
637 	 * support callback based unlocks
638 	 */
639 	if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
640 		return false;
641 
642 	wait->wait.func = io_async_buf_func;
643 	wait->wait.private = req;
644 	wait->wait.flags = 0;
645 	INIT_LIST_HEAD(&wait->wait.entry);
646 	kiocb->ki_flags |= IOCB_WAITQ;
647 	kiocb->ki_flags &= ~IOCB_NOWAIT;
648 	kiocb->ki_waitq = wait;
649 	return true;
650 }
651 
652 static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter)
653 {
654 	struct file *file = rw->kiocb.ki_filp;
655 
656 	if (likely(file->f_op->read_iter))
657 		return call_read_iter(file, &rw->kiocb, iter);
658 	else if (file->f_op->read)
659 		return loop_rw_iter(READ, rw, iter);
660 	else
661 		return -EINVAL;
662 }
663 
664 static bool need_complete_io(struct io_kiocb *req)
665 {
666 	return req->flags & REQ_F_ISREG ||
667 		S_ISBLK(file_inode(req->file)->i_mode);
668 }
669 
670 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
671 {
672 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
673 	struct kiocb *kiocb = &rw->kiocb;
674 	struct io_ring_ctx *ctx = req->ctx;
675 	struct file *file = req->file;
676 	int ret;
677 
678 	if (unlikely(!file || !(file->f_mode & mode)))
679 		return -EBADF;
680 
681 	if (!(req->flags & REQ_F_FIXED_FILE))
682 		req->flags |= io_file_get_flags(file);
683 
684 	kiocb->ki_flags = file->f_iocb_flags;
685 	ret = kiocb_set_rw_flags(kiocb, rw->flags);
686 	if (unlikely(ret))
687 		return ret;
688 	kiocb->ki_flags |= IOCB_ALLOC_CACHE;
689 
690 	/*
691 	 * If the file is marked O_NONBLOCK, still allow retry for it if it
692 	 * supports async. Otherwise it's impossible to use O_NONBLOCK files
693 	 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
694 	 */
695 	if ((kiocb->ki_flags & IOCB_NOWAIT) ||
696 	    ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
697 		req->flags |= REQ_F_NOWAIT;
698 
699 	if (ctx->flags & IORING_SETUP_IOPOLL) {
700 		if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
701 			return -EOPNOTSUPP;
702 
703 		kiocb->private = NULL;
704 		kiocb->ki_flags |= IOCB_HIPRI;
705 		kiocb->ki_complete = io_complete_rw_iopoll;
706 		req->iopoll_completed = 0;
707 	} else {
708 		if (kiocb->ki_flags & IOCB_HIPRI)
709 			return -EINVAL;
710 		kiocb->ki_complete = io_complete_rw;
711 	}
712 
713 	return 0;
714 }
715 
716 int io_read(struct io_kiocb *req, unsigned int issue_flags)
717 {
718 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
719 	struct io_rw_state __s, *s = &__s;
720 	struct iovec *iovec;
721 	struct kiocb *kiocb = &rw->kiocb;
722 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
723 	struct io_async_rw *io;
724 	ssize_t ret, ret2;
725 	loff_t *ppos;
726 
727 	if (!req_has_async_data(req)) {
728 		ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
729 		if (unlikely(ret < 0))
730 			return ret;
731 	} else {
732 		io = req->async_data;
733 		s = &io->s;
734 
735 		/*
736 		 * Safe and required to re-import if we're using provided
737 		 * buffers, as we dropped the selected one before retry.
738 		 */
739 		if (io_do_buffer_select(req)) {
740 			ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
741 			if (unlikely(ret < 0))
742 				return ret;
743 		}
744 
745 		/*
746 		 * We come here from an earlier attempt, restore our state to
747 		 * match in case it doesn't. It's cheap enough that we don't
748 		 * need to make this conditional.
749 		 */
750 		iov_iter_restore(&s->iter, &s->iter_state);
751 		iovec = NULL;
752 	}
753 	ret = io_rw_init_file(req, FMODE_READ);
754 	if (unlikely(ret)) {
755 		kfree(iovec);
756 		return ret;
757 	}
758 	req->cqe.res = iov_iter_count(&s->iter);
759 
760 	if (force_nonblock) {
761 		/* If the file doesn't support async, just async punt */
762 		if (unlikely(!io_file_supports_nowait(req))) {
763 			ret = io_setup_async_rw(req, iovec, s, true);
764 			return ret ?: -EAGAIN;
765 		}
766 		kiocb->ki_flags |= IOCB_NOWAIT;
767 	} else {
768 		/* Ensure we clear previously set non-block flag */
769 		kiocb->ki_flags &= ~IOCB_NOWAIT;
770 	}
771 
772 	ppos = io_kiocb_update_pos(req);
773 
774 	ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
775 	if (unlikely(ret)) {
776 		kfree(iovec);
777 		return ret;
778 	}
779 
780 	ret = io_iter_do_read(rw, &s->iter);
781 
782 	if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
783 		req->flags &= ~REQ_F_REISSUE;
784 		/* if we can poll, just do that */
785 		if (req->opcode == IORING_OP_READ && file_can_poll(req->file))
786 			return -EAGAIN;
787 		/* IOPOLL retry should happen for io-wq threads */
788 		if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
789 			goto done;
790 		/* no retry on NONBLOCK nor RWF_NOWAIT */
791 		if (req->flags & REQ_F_NOWAIT)
792 			goto done;
793 		ret = 0;
794 	} else if (ret == -EIOCBQUEUED) {
795 		if (iovec)
796 			kfree(iovec);
797 		return IOU_ISSUE_SKIP_COMPLETE;
798 	} else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
799 		   (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) {
800 		/* read all, failed, already did sync or don't want to retry */
801 		goto done;
802 	}
803 
804 	/*
805 	 * Don't depend on the iter state matching what was consumed, or being
806 	 * untouched in case of error. Restore it and we'll advance it
807 	 * manually if we need to.
808 	 */
809 	iov_iter_restore(&s->iter, &s->iter_state);
810 
811 	ret2 = io_setup_async_rw(req, iovec, s, true);
812 	iovec = NULL;
813 	if (ret2) {
814 		ret = ret > 0 ? ret : ret2;
815 		goto done;
816 	}
817 
818 	io = req->async_data;
819 	s = &io->s;
820 	/*
821 	 * Now use our persistent iterator and state, if we aren't already.
822 	 * We've restored and mapped the iter to match.
823 	 */
824 
825 	do {
826 		/*
827 		 * We end up here because of a partial read, either from
828 		 * above or inside this loop. Advance the iter by the bytes
829 		 * that were consumed.
830 		 */
831 		iov_iter_advance(&s->iter, ret);
832 		if (!iov_iter_count(&s->iter))
833 			break;
834 		io->bytes_done += ret;
835 		iov_iter_save_state(&s->iter, &s->iter_state);
836 
837 		/* if we can retry, do so with the callbacks armed */
838 		if (!io_rw_should_retry(req)) {
839 			kiocb->ki_flags &= ~IOCB_WAITQ;
840 			return -EAGAIN;
841 		}
842 
843 		req->cqe.res = iov_iter_count(&s->iter);
844 		/*
845 		 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
846 		 * we get -EIOCBQUEUED, then we'll get a notification when the
847 		 * desired page gets unlocked. We can also get a partial read
848 		 * here, and if we do, then just retry at the new offset.
849 		 */
850 		ret = io_iter_do_read(rw, &s->iter);
851 		if (ret == -EIOCBQUEUED)
852 			return IOU_ISSUE_SKIP_COMPLETE;
853 		/* we got some bytes, but not all. retry. */
854 		kiocb->ki_flags &= ~IOCB_WAITQ;
855 		iov_iter_restore(&s->iter, &s->iter_state);
856 	} while (ret > 0);
857 done:
858 	/* it's faster to check here then delegate to kfree */
859 	if (iovec)
860 		kfree(iovec);
861 	return kiocb_done(req, ret, issue_flags);
862 }
863 
864 int io_write(struct io_kiocb *req, unsigned int issue_flags)
865 {
866 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
867 	struct io_rw_state __s, *s = &__s;
868 	struct iovec *iovec;
869 	struct kiocb *kiocb = &rw->kiocb;
870 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
871 	ssize_t ret, ret2;
872 	loff_t *ppos;
873 
874 	if (!req_has_async_data(req)) {
875 		ret = io_import_iovec(ITER_SOURCE, req, &iovec, s, issue_flags);
876 		if (unlikely(ret < 0))
877 			return ret;
878 	} else {
879 		struct io_async_rw *io = req->async_data;
880 
881 		s = &io->s;
882 		iov_iter_restore(&s->iter, &s->iter_state);
883 		iovec = NULL;
884 	}
885 	ret = io_rw_init_file(req, FMODE_WRITE);
886 	if (unlikely(ret)) {
887 		kfree(iovec);
888 		return ret;
889 	}
890 	req->cqe.res = iov_iter_count(&s->iter);
891 
892 	if (force_nonblock) {
893 		/* If the file doesn't support async, just async punt */
894 		if (unlikely(!io_file_supports_nowait(req)))
895 			goto copy_iov;
896 
897 		/* File path supports NOWAIT for non-direct_IO only for block devices. */
898 		if (!(kiocb->ki_flags & IOCB_DIRECT) &&
899 			!(kiocb->ki_filp->f_mode & FMODE_BUF_WASYNC) &&
900 			(req->flags & REQ_F_ISREG))
901 			goto copy_iov;
902 
903 		kiocb->ki_flags |= IOCB_NOWAIT;
904 	} else {
905 		/* Ensure we clear previously set non-block flag */
906 		kiocb->ki_flags &= ~IOCB_NOWAIT;
907 	}
908 
909 	ppos = io_kiocb_update_pos(req);
910 
911 	ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
912 	if (unlikely(ret)) {
913 		kfree(iovec);
914 		return ret;
915 	}
916 
917 	/*
918 	 * Open-code file_start_write here to grab freeze protection,
919 	 * which will be released by another thread in
920 	 * io_complete_rw().  Fool lockdep by telling it the lock got
921 	 * released so that it doesn't complain about the held lock when
922 	 * we return to userspace.
923 	 */
924 	if (req->flags & REQ_F_ISREG) {
925 		sb_start_write(file_inode(req->file)->i_sb);
926 		__sb_writers_release(file_inode(req->file)->i_sb,
927 					SB_FREEZE_WRITE);
928 	}
929 	kiocb->ki_flags |= IOCB_WRITE;
930 
931 	/*
932 	 * For non-polled IO, set IOCB_DIO_CALLER_COMP, stating that our handler
933 	 * groks deferring the completion to task context. This isn't
934 	 * necessary and useful for polled IO as that can always complete
935 	 * directly.
936 	 */
937 	if (!(kiocb->ki_flags & IOCB_HIPRI))
938 		kiocb->ki_flags |= IOCB_DIO_CALLER_COMP;
939 
940 	if (likely(req->file->f_op->write_iter))
941 		ret2 = call_write_iter(req->file, kiocb, &s->iter);
942 	else if (req->file->f_op->write)
943 		ret2 = loop_rw_iter(WRITE, rw, &s->iter);
944 	else
945 		ret2 = -EINVAL;
946 
947 	if (req->flags & REQ_F_REISSUE) {
948 		req->flags &= ~REQ_F_REISSUE;
949 		ret2 = -EAGAIN;
950 	}
951 
952 	/*
953 	 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
954 	 * retry them without IOCB_NOWAIT.
955 	 */
956 	if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
957 		ret2 = -EAGAIN;
958 	/* no retry on NONBLOCK nor RWF_NOWAIT */
959 	if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
960 		goto done;
961 	if (!force_nonblock || ret2 != -EAGAIN) {
962 		/* IOPOLL retry should happen for io-wq threads */
963 		if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
964 			goto copy_iov;
965 
966 		if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) {
967 			struct io_async_rw *io;
968 
969 			trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2,
970 						req->cqe.res, ret2);
971 
972 			/* This is a partial write. The file pos has already been
973 			 * updated, setup the async struct to complete the request
974 			 * in the worker. Also update bytes_done to account for
975 			 * the bytes already written.
976 			 */
977 			iov_iter_save_state(&s->iter, &s->iter_state);
978 			ret = io_setup_async_rw(req, iovec, s, true);
979 
980 			io = req->async_data;
981 			if (io)
982 				io->bytes_done += ret2;
983 
984 			if (kiocb->ki_flags & IOCB_WRITE)
985 				kiocb_end_write(req);
986 			return ret ? ret : -EAGAIN;
987 		}
988 done:
989 		ret = kiocb_done(req, ret2, issue_flags);
990 	} else {
991 copy_iov:
992 		iov_iter_restore(&s->iter, &s->iter_state);
993 		ret = io_setup_async_rw(req, iovec, s, false);
994 		if (!ret) {
995 			if (kiocb->ki_flags & IOCB_WRITE)
996 				kiocb_end_write(req);
997 			return -EAGAIN;
998 		}
999 		return ret;
1000 	}
1001 	/* it's reportedly faster than delegating the null check to kfree() */
1002 	if (iovec)
1003 		kfree(iovec);
1004 	return ret;
1005 }
1006 
1007 static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
1008 {
1009 	io_commit_cqring_flush(ctx);
1010 	if (ctx->flags & IORING_SETUP_SQPOLL)
1011 		io_cqring_wake(ctx);
1012 }
1013 
1014 void io_rw_fail(struct io_kiocb *req)
1015 {
1016 	int res;
1017 
1018 	res = io_fixup_rw_res(req, req->cqe.res);
1019 	io_req_set_res(req, res, req->cqe.flags);
1020 }
1021 
1022 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
1023 {
1024 	struct io_wq_work_node *pos, *start, *prev;
1025 	unsigned int poll_flags = 0;
1026 	DEFINE_IO_COMP_BATCH(iob);
1027 	int nr_events = 0;
1028 
1029 	/*
1030 	 * Only spin for completions if we don't have multiple devices hanging
1031 	 * off our complete list.
1032 	 */
1033 	if (ctx->poll_multi_queue || force_nonspin)
1034 		poll_flags |= BLK_POLL_ONESHOT;
1035 
1036 	wq_list_for_each(pos, start, &ctx->iopoll_list) {
1037 		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1038 		struct file *file = req->file;
1039 		int ret;
1040 
1041 		/*
1042 		 * Move completed and retryable entries to our local lists.
1043 		 * If we find a request that requires polling, break out
1044 		 * and complete those lists first, if we have entries there.
1045 		 */
1046 		if (READ_ONCE(req->iopoll_completed))
1047 			break;
1048 
1049 		if (req->opcode == IORING_OP_URING_CMD) {
1050 			struct io_uring_cmd *ioucmd;
1051 
1052 			ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
1053 			ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob,
1054 								poll_flags);
1055 		} else {
1056 			struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1057 
1058 			ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
1059 		}
1060 		if (unlikely(ret < 0))
1061 			return ret;
1062 		else if (ret)
1063 			poll_flags |= BLK_POLL_ONESHOT;
1064 
1065 		/* iopoll may have completed current req */
1066 		if (!rq_list_empty(iob.req_list) ||
1067 		    READ_ONCE(req->iopoll_completed))
1068 			break;
1069 	}
1070 
1071 	if (!rq_list_empty(iob.req_list))
1072 		iob.complete(&iob);
1073 	else if (!pos)
1074 		return 0;
1075 
1076 	prev = start;
1077 	wq_list_for_each_resume(pos, prev) {
1078 		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1079 
1080 		/* order with io_complete_rw_iopoll(), e.g. ->result updates */
1081 		if (!smp_load_acquire(&req->iopoll_completed))
1082 			break;
1083 		nr_events++;
1084 		if (unlikely(req->flags & REQ_F_CQE_SKIP))
1085 			continue;
1086 
1087 		req->cqe.flags = io_put_kbuf(req, 0);
1088 		if (unlikely(!__io_fill_cqe_req(ctx, req))) {
1089 			spin_lock(&ctx->completion_lock);
1090 			io_req_cqe_overflow(req);
1091 			spin_unlock(&ctx->completion_lock);
1092 		}
1093 	}
1094 
1095 	if (unlikely(!nr_events))
1096 		return 0;
1097 
1098 	io_commit_cqring(ctx);
1099 	io_cqring_ev_posted_iopoll(ctx);
1100 	pos = start ? start->next : ctx->iopoll_list.first;
1101 	wq_list_cut(&ctx->iopoll_list, prev, start);
1102 	io_free_batch_list(ctx, pos);
1103 	return nr_events;
1104 }
1105