xref: /openbmc/linux/io_uring/sync.c (revision 2d99a7ec)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/io_uring.h>
10 #include <linux/fsnotify.h>
11 
12 #include <uapi/linux/io_uring.h>
13 
14 #include "io_uring.h"
15 #include "sync.h"
16 
17 struct io_sync {
18 	struct file			*file;
19 	loff_t				len;
20 	loff_t				off;
21 	int				flags;
22 	int				mode;
23 };
24 
25 int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
26 {
27 	struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
28 
29 	if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in))
30 		return -EINVAL;
31 
32 	sync->off = READ_ONCE(sqe->off);
33 	sync->len = READ_ONCE(sqe->len);
34 	sync->flags = READ_ONCE(sqe->sync_range_flags);
35 	req->flags |= REQ_F_FORCE_ASYNC;
36 
37 	return 0;
38 }
39 
40 int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
41 {
42 	struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
43 	int ret;
44 
45 	/* sync_file_range always requires a blocking context */
46 	WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
47 
48 	ret = sync_file_range(req->file, sync->off, sync->len, sync->flags);
49 	io_req_set_res(req, ret, 0);
50 	return IOU_OK;
51 }
52 
53 int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
54 {
55 	struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
56 
57 	if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in))
58 		return -EINVAL;
59 
60 	sync->flags = READ_ONCE(sqe->fsync_flags);
61 	if (unlikely(sync->flags & ~IORING_FSYNC_DATASYNC))
62 		return -EINVAL;
63 
64 	sync->off = READ_ONCE(sqe->off);
65 	sync->len = READ_ONCE(sqe->len);
66 	req->flags |= REQ_F_FORCE_ASYNC;
67 	return 0;
68 }
69 
70 int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
71 {
72 	struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
73 	loff_t end = sync->off + sync->len;
74 	int ret;
75 
76 	/* fsync always requires a blocking context */
77 	WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
78 
79 	ret = vfs_fsync_range(req->file, sync->off, end > 0 ? end : LLONG_MAX,
80 				sync->flags & IORING_FSYNC_DATASYNC);
81 	io_req_set_res(req, ret, 0);
82 	return IOU_OK;
83 }
84 
85 int io_fallocate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
86 {
87 	struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
88 
89 	if (sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
90 		return -EINVAL;
91 
92 	sync->off = READ_ONCE(sqe->off);
93 	sync->len = READ_ONCE(sqe->addr);
94 	sync->mode = READ_ONCE(sqe->len);
95 	req->flags |= REQ_F_FORCE_ASYNC;
96 	return 0;
97 }
98 
99 int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
100 {
101 	struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
102 	int ret;
103 
104 	/* fallocate always requiring blocking context */
105 	WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
106 
107 	ret = vfs_fallocate(req->file, sync->mode, sync->off, sync->len);
108 	if (ret >= 0)
109 		fsnotify_modify(req->file);
110 	io_req_set_res(req, ret, 0);
111 	return IOU_OK;
112 }
113