1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/fs.h> 5 #include <linux/file.h> 6 #include <linux/fdtable.h> 7 #include <linux/fsnotify.h> 8 #include <linux/namei.h> 9 #include <linux/io_uring.h> 10 11 #include <uapi/linux/io_uring.h> 12 13 #include "../fs/internal.h" 14 15 #include "io_uring.h" 16 #include "rsrc.h" 17 #include "openclose.h" 18 19 struct io_open { 20 struct file *file; 21 int dfd; 22 u32 file_slot; 23 struct filename *filename; 24 struct open_how how; 25 unsigned long nofile; 26 }; 27 28 struct io_close { 29 struct file *file; 30 int fd; 31 u32 file_slot; 32 }; 33 34 static bool io_openat_force_async(struct io_open *open) 35 { 36 /* 37 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open, 38 * it'll always -EAGAIN 39 */ 40 return open->how.flags & (O_TRUNC | O_CREAT | O_TMPFILE); 41 } 42 43 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 44 { 45 struct io_open *open = io_kiocb_to_cmd(req, struct io_open); 46 const char __user *fname; 47 int ret; 48 49 if (unlikely(sqe->buf_index)) 50 return -EINVAL; 51 if (unlikely(req->flags & REQ_F_FIXED_FILE)) 52 return -EBADF; 53 54 /* open.how should be already initialised */ 55 if (!(open->how.flags & O_PATH) && force_o_largefile()) 56 open->how.flags |= O_LARGEFILE; 57 58 open->dfd = READ_ONCE(sqe->fd); 59 fname = u64_to_user_ptr(READ_ONCE(sqe->addr)); 60 open->filename = getname(fname); 61 if (IS_ERR(open->filename)) { 62 ret = PTR_ERR(open->filename); 63 open->filename = NULL; 64 return ret; 65 } 66 67 open->file_slot = READ_ONCE(sqe->file_index); 68 if (open->file_slot && (open->how.flags & O_CLOEXEC)) 69 return -EINVAL; 70 71 open->nofile = rlimit(RLIMIT_NOFILE); 72 req->flags |= REQ_F_NEED_CLEANUP; 73 if (io_openat_force_async(open)) 74 req->flags |= REQ_F_FORCE_ASYNC; 75 return 0; 76 } 77 78 int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 79 { 80 struct io_open *open = io_kiocb_to_cmd(req, struct io_open); 81 u64 mode = READ_ONCE(sqe->len); 82 u64 flags = READ_ONCE(sqe->open_flags); 83 84 open->how = build_open_how(flags, mode); 85 return __io_openat_prep(req, sqe); 86 } 87 88 int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 89 { 90 struct io_open *open = io_kiocb_to_cmd(req, struct io_open); 91 struct open_how __user *how; 92 size_t len; 93 int ret; 94 95 how = u64_to_user_ptr(READ_ONCE(sqe->addr2)); 96 len = READ_ONCE(sqe->len); 97 if (len < OPEN_HOW_SIZE_VER0) 98 return -EINVAL; 99 100 ret = copy_struct_from_user(&open->how, sizeof(open->how), how, len); 101 if (ret) 102 return ret; 103 104 return __io_openat_prep(req, sqe); 105 } 106 107 int io_openat2(struct io_kiocb *req, unsigned int issue_flags) 108 { 109 struct io_open *open = io_kiocb_to_cmd(req, struct io_open); 110 struct open_flags op; 111 struct file *file; 112 bool resolve_nonblock, nonblock_set; 113 bool fixed = !!open->file_slot; 114 int ret; 115 116 ret = build_open_flags(&open->how, &op); 117 if (ret) 118 goto err; 119 nonblock_set = op.open_flag & O_NONBLOCK; 120 resolve_nonblock = open->how.resolve & RESOLVE_CACHED; 121 if (issue_flags & IO_URING_F_NONBLOCK) { 122 WARN_ON_ONCE(io_openat_force_async(open)); 123 op.lookup_flags |= LOOKUP_CACHED; 124 op.open_flag |= O_NONBLOCK; 125 } 126 127 if (!fixed) { 128 ret = __get_unused_fd_flags(open->how.flags, open->nofile); 129 if (ret < 0) 130 goto err; 131 } 132 133 file = do_filp_open(open->dfd, open->filename, &op); 134 if (IS_ERR(file)) { 135 /* 136 * We could hang on to this 'fd' on retrying, but seems like 137 * marginal gain for something that is now known to be a slower 138 * path. So just put it, and we'll get a new one when we retry. 139 */ 140 if (!fixed) 141 put_unused_fd(ret); 142 143 ret = PTR_ERR(file); 144 /* only retry if RESOLVE_CACHED wasn't already set by application */ 145 if (ret == -EAGAIN && 146 (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK))) 147 return -EAGAIN; 148 goto err; 149 } 150 151 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set) 152 file->f_flags &= ~O_NONBLOCK; 153 154 if (!fixed) 155 fd_install(ret, file); 156 else 157 ret = io_fixed_fd_install(req, issue_flags, file, 158 open->file_slot); 159 err: 160 putname(open->filename); 161 req->flags &= ~REQ_F_NEED_CLEANUP; 162 if (ret < 0) 163 req_set_fail(req); 164 io_req_set_res(req, ret, 0); 165 return IOU_OK; 166 } 167 168 int io_openat(struct io_kiocb *req, unsigned int issue_flags) 169 { 170 return io_openat2(req, issue_flags); 171 } 172 173 void io_open_cleanup(struct io_kiocb *req) 174 { 175 struct io_open *open = io_kiocb_to_cmd(req, struct io_open); 176 177 if (open->filename) 178 putname(open->filename); 179 } 180 181 int __io_close_fixed(struct io_ring_ctx *ctx, unsigned int issue_flags, 182 unsigned int offset) 183 { 184 int ret; 185 186 io_ring_submit_lock(ctx, issue_flags); 187 ret = io_fixed_fd_remove(ctx, offset); 188 io_ring_submit_unlock(ctx, issue_flags); 189 190 return ret; 191 } 192 193 static inline int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags) 194 { 195 struct io_close *close = io_kiocb_to_cmd(req, struct io_close); 196 197 return __io_close_fixed(req->ctx, issue_flags, close->file_slot - 1); 198 } 199 200 int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 201 { 202 struct io_close *close = io_kiocb_to_cmd(req, struct io_close); 203 204 if (sqe->off || sqe->addr || sqe->len || sqe->rw_flags || sqe->buf_index) 205 return -EINVAL; 206 if (req->flags & REQ_F_FIXED_FILE) 207 return -EBADF; 208 209 close->fd = READ_ONCE(sqe->fd); 210 close->file_slot = READ_ONCE(sqe->file_index); 211 if (close->file_slot && close->fd) 212 return -EINVAL; 213 214 return 0; 215 } 216 217 int io_close(struct io_kiocb *req, unsigned int issue_flags) 218 { 219 struct files_struct *files = current->files; 220 struct io_close *close = io_kiocb_to_cmd(req, struct io_close); 221 struct fdtable *fdt; 222 struct file *file; 223 int ret = -EBADF; 224 225 if (close->file_slot) { 226 ret = io_close_fixed(req, issue_flags); 227 goto err; 228 } 229 230 spin_lock(&files->file_lock); 231 fdt = files_fdtable(files); 232 if (close->fd >= fdt->max_fds) { 233 spin_unlock(&files->file_lock); 234 goto err; 235 } 236 file = rcu_dereference_protected(fdt->fd[close->fd], 237 lockdep_is_held(&files->file_lock)); 238 if (!file || io_is_uring_fops(file)) { 239 spin_unlock(&files->file_lock); 240 goto err; 241 } 242 243 /* if the file has a flush method, be safe and punt to async */ 244 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) { 245 spin_unlock(&files->file_lock); 246 return -EAGAIN; 247 } 248 249 file = __close_fd_get_file(close->fd); 250 spin_unlock(&files->file_lock); 251 if (!file) 252 goto err; 253 254 /* No ->flush() or already async, safely close from here */ 255 ret = filp_close(file, current->files); 256 err: 257 if (ret < 0) 258 req_set_fail(req); 259 io_req_set_res(req, ret, 0); 260 return IOU_OK; 261 } 262