1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/fs.h> 5 #include <linux/file.h> 6 #include <linux/fdtable.h> 7 #include <linux/fsnotify.h> 8 #include <linux/namei.h> 9 #include <linux/io_uring.h> 10 11 #include <uapi/linux/io_uring.h> 12 13 #include "../fs/internal.h" 14 15 #include "io_uring.h" 16 #include "rsrc.h" 17 #include "openclose.h" 18 19 struct io_open { 20 struct file *file; 21 int dfd; 22 u32 file_slot; 23 struct filename *filename; 24 struct open_how how; 25 unsigned long nofile; 26 }; 27 28 struct io_close { 29 struct file *file; 30 int fd; 31 u32 file_slot; 32 }; 33 34 static bool io_openat_force_async(struct io_open *open) 35 { 36 /* 37 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open, 38 * it'll always -EAGAIN 39 */ 40 return open->how.flags & (O_TRUNC | O_CREAT | O_TMPFILE); 41 } 42 43 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 44 { 45 struct io_open *open = io_kiocb_to_cmd(req, struct io_open); 46 const char __user *fname; 47 int ret; 48 49 if (unlikely(sqe->buf_index)) 50 return -EINVAL; 51 if (unlikely(req->flags & REQ_F_FIXED_FILE)) 52 return -EBADF; 53 54 /* open.how should be already initialised */ 55 if (!(open->how.flags & O_PATH) && force_o_largefile()) 56 open->how.flags |= O_LARGEFILE; 57 58 open->dfd = READ_ONCE(sqe->fd); 59 fname = u64_to_user_ptr(READ_ONCE(sqe->addr)); 60 open->filename = getname(fname); 61 if (IS_ERR(open->filename)) { 62 ret = PTR_ERR(open->filename); 63 open->filename = NULL; 64 return ret; 65 } 66 67 open->file_slot = READ_ONCE(sqe->file_index); 68 if (open->file_slot && (open->how.flags & O_CLOEXEC)) 69 return -EINVAL; 70 71 open->nofile = rlimit(RLIMIT_NOFILE); 72 req->flags |= REQ_F_NEED_CLEANUP; 73 if (io_openat_force_async(open)) 74 req->flags |= REQ_F_FORCE_ASYNC; 75 return 0; 76 } 77 78 int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 79 { 80 struct io_open *open = io_kiocb_to_cmd(req, struct io_open); 81 u64 mode = READ_ONCE(sqe->len); 82 u64 flags = READ_ONCE(sqe->open_flags); 83 84 open->how = build_open_how(flags, mode); 85 return __io_openat_prep(req, sqe); 86 } 87 88 int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 89 { 90 struct io_open *open = io_kiocb_to_cmd(req, struct io_open); 91 struct open_how __user *how; 92 size_t len; 93 int ret; 94 95 how = u64_to_user_ptr(READ_ONCE(sqe->addr2)); 96 len = READ_ONCE(sqe->len); 97 if (len < OPEN_HOW_SIZE_VER0) 98 return -EINVAL; 99 100 ret = copy_struct_from_user(&open->how, sizeof(open->how), how, len); 101 if (ret) 102 return ret; 103 104 return __io_openat_prep(req, sqe); 105 } 106 107 int io_openat2(struct io_kiocb *req, unsigned int issue_flags) 108 { 109 struct io_open *open = io_kiocb_to_cmd(req, struct io_open); 110 struct open_flags op; 111 struct file *file; 112 bool resolve_nonblock, nonblock_set; 113 bool fixed = !!open->file_slot; 114 int ret; 115 116 ret = build_open_flags(&open->how, &op); 117 if (ret) 118 goto err; 119 nonblock_set = op.open_flag & O_NONBLOCK; 120 resolve_nonblock = open->how.resolve & RESOLVE_CACHED; 121 if (issue_flags & IO_URING_F_NONBLOCK) { 122 WARN_ON_ONCE(io_openat_force_async(open)); 123 op.lookup_flags |= LOOKUP_CACHED; 124 op.open_flag |= O_NONBLOCK; 125 } 126 127 if (!fixed) { 128 ret = __get_unused_fd_flags(open->how.flags, open->nofile); 129 if (ret < 0) 130 goto err; 131 } 132 133 file = do_filp_open(open->dfd, open->filename, &op); 134 if (IS_ERR(file)) { 135 /* 136 * We could hang on to this 'fd' on retrying, but seems like 137 * marginal gain for something that is now known to be a slower 138 * path. So just put it, and we'll get a new one when we retry. 139 */ 140 if (!fixed) 141 put_unused_fd(ret); 142 143 ret = PTR_ERR(file); 144 /* only retry if RESOLVE_CACHED wasn't already set by application */ 145 if (ret == -EAGAIN && 146 (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK))) 147 return -EAGAIN; 148 goto err; 149 } 150 151 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set) 152 file->f_flags &= ~O_NONBLOCK; 153 fsnotify_open(file); 154 155 if (!fixed) 156 fd_install(ret, file); 157 else 158 ret = io_fixed_fd_install(req, issue_flags, file, 159 open->file_slot); 160 err: 161 putname(open->filename); 162 req->flags &= ~REQ_F_NEED_CLEANUP; 163 if (ret < 0) 164 req_set_fail(req); 165 io_req_set_res(req, ret, 0); 166 return IOU_OK; 167 } 168 169 int io_openat(struct io_kiocb *req, unsigned int issue_flags) 170 { 171 return io_openat2(req, issue_flags); 172 } 173 174 void io_open_cleanup(struct io_kiocb *req) 175 { 176 struct io_open *open = io_kiocb_to_cmd(req, struct io_open); 177 178 if (open->filename) 179 putname(open->filename); 180 } 181 182 int __io_close_fixed(struct io_ring_ctx *ctx, unsigned int issue_flags, 183 unsigned int offset) 184 { 185 int ret; 186 187 io_ring_submit_lock(ctx, issue_flags); 188 ret = io_fixed_fd_remove(ctx, offset); 189 io_ring_submit_unlock(ctx, issue_flags); 190 191 return ret; 192 } 193 194 static inline int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags) 195 { 196 struct io_close *close = io_kiocb_to_cmd(req, struct io_close); 197 198 return __io_close_fixed(req->ctx, issue_flags, close->file_slot - 1); 199 } 200 201 int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 202 { 203 struct io_close *close = io_kiocb_to_cmd(req, struct io_close); 204 205 if (sqe->off || sqe->addr || sqe->len || sqe->rw_flags || sqe->buf_index) 206 return -EINVAL; 207 if (req->flags & REQ_F_FIXED_FILE) 208 return -EBADF; 209 210 close->fd = READ_ONCE(sqe->fd); 211 close->file_slot = READ_ONCE(sqe->file_index); 212 if (close->file_slot && close->fd) 213 return -EINVAL; 214 215 return 0; 216 } 217 218 int io_close(struct io_kiocb *req, unsigned int issue_flags) 219 { 220 struct files_struct *files = current->files; 221 struct io_close *close = io_kiocb_to_cmd(req, struct io_close); 222 struct fdtable *fdt; 223 struct file *file; 224 int ret = -EBADF; 225 226 if (close->file_slot) { 227 ret = io_close_fixed(req, issue_flags); 228 goto err; 229 } 230 231 spin_lock(&files->file_lock); 232 fdt = files_fdtable(files); 233 if (close->fd >= fdt->max_fds) { 234 spin_unlock(&files->file_lock); 235 goto err; 236 } 237 file = rcu_dereference_protected(fdt->fd[close->fd], 238 lockdep_is_held(&files->file_lock)); 239 if (!file || io_is_uring_fops(file)) { 240 spin_unlock(&files->file_lock); 241 goto err; 242 } 243 244 /* if the file has a flush method, be safe and punt to async */ 245 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) { 246 spin_unlock(&files->file_lock); 247 return -EAGAIN; 248 } 249 250 file = __close_fd_get_file(close->fd); 251 spin_unlock(&files->file_lock); 252 if (!file) 253 goto err; 254 255 /* No ->flush() or already async, safely close from here */ 256 ret = filp_close(file, current->files); 257 err: 258 if (ret < 0) 259 req_set_fail(req); 260 io_req_set_res(req, ret, 0); 261 return IOU_OK; 262 } 263