1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * File operations for Coda. 4 * Original version: (C) 1996 Peter Braam 5 * Rewritten for Linux 2.1: (C) 1997 Carnegie Mellon University 6 * 7 * Carnegie Mellon encourages users of this code to contribute improvements 8 * to the Coda project. Contact Peter Braam <coda@cs.cmu.edu>. 9 */ 10 11 #include <linux/refcount.h> 12 #include <linux/types.h> 13 #include <linux/kernel.h> 14 #include <linux/time.h> 15 #include <linux/file.h> 16 #include <linux/fs.h> 17 #include <linux/pagemap.h> 18 #include <linux/stat.h> 19 #include <linux/cred.h> 20 #include <linux/errno.h> 21 #include <linux/spinlock.h> 22 #include <linux/string.h> 23 #include <linux/slab.h> 24 #include <linux/uaccess.h> 25 #include <linux/uio.h> 26 #include <linux/splice.h> 27 28 #include <linux/coda.h> 29 #include "coda_psdev.h" 30 #include "coda_linux.h" 31 #include "coda_int.h" 32 33 struct coda_vm_ops { 34 refcount_t refcnt; 35 struct file *coda_file; 36 const struct vm_operations_struct *host_vm_ops; 37 struct vm_operations_struct vm_ops; 38 }; 39 40 static ssize_t 41 coda_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 42 { 43 struct file *coda_file = iocb->ki_filp; 44 struct inode *coda_inode = file_inode(coda_file); 45 struct coda_file_info *cfi = coda_ftoc(coda_file); 46 loff_t ki_pos = iocb->ki_pos; 47 size_t count = iov_iter_count(to); 48 ssize_t ret; 49 50 ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode), 51 &cfi->cfi_access_intent, 52 count, ki_pos, CODA_ACCESS_TYPE_READ); 53 if (ret) 54 goto finish_read; 55 56 ret = vfs_iter_read(cfi->cfi_container, to, &iocb->ki_pos, 0); 57 58 finish_read: 59 venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode), 60 &cfi->cfi_access_intent, 61 count, ki_pos, CODA_ACCESS_TYPE_READ_FINISH); 62 return ret; 63 } 64 65 static ssize_t 66 coda_file_write_iter(struct kiocb *iocb, struct iov_iter *to) 67 { 68 struct file *coda_file = iocb->ki_filp; 69 struct inode *coda_inode = file_inode(coda_file); 70 struct coda_file_info *cfi = coda_ftoc(coda_file); 71 struct file *host_file = cfi->cfi_container; 72 loff_t ki_pos = iocb->ki_pos; 73 size_t count = iov_iter_count(to); 74 ssize_t ret; 75 76 ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode), 77 &cfi->cfi_access_intent, 78 count, ki_pos, CODA_ACCESS_TYPE_WRITE); 79 if (ret) 80 goto finish_write; 81 82 file_start_write(host_file); 83 inode_lock(coda_inode); 84 ret = vfs_iter_write(cfi->cfi_container, to, &iocb->ki_pos, 0); 85 coda_inode->i_size = file_inode(host_file)->i_size; 86 coda_inode->i_blocks = (coda_inode->i_size + 511) >> 9; 87 coda_inode->i_mtime = inode_set_ctime_current(coda_inode); 88 inode_unlock(coda_inode); 89 file_end_write(host_file); 90 91 finish_write: 92 venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode), 93 &cfi->cfi_access_intent, 94 count, ki_pos, CODA_ACCESS_TYPE_WRITE_FINISH); 95 return ret; 96 } 97 98 static ssize_t 99 coda_file_splice_read(struct file *coda_file, loff_t *ppos, 100 struct pipe_inode_info *pipe, 101 size_t len, unsigned int flags) 102 { 103 struct inode *coda_inode = file_inode(coda_file); 104 struct coda_file_info *cfi = coda_ftoc(coda_file); 105 struct file *in = cfi->cfi_container; 106 loff_t ki_pos = *ppos; 107 ssize_t ret; 108 109 ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode), 110 &cfi->cfi_access_intent, 111 len, ki_pos, CODA_ACCESS_TYPE_READ); 112 if (ret) 113 goto finish_read; 114 115 ret = vfs_splice_read(in, ppos, pipe, len, flags); 116 117 finish_read: 118 venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode), 119 &cfi->cfi_access_intent, 120 len, ki_pos, CODA_ACCESS_TYPE_READ_FINISH); 121 return ret; 122 } 123 124 static void 125 coda_vm_open(struct vm_area_struct *vma) 126 { 127 struct coda_vm_ops *cvm_ops = 128 container_of(vma->vm_ops, struct coda_vm_ops, vm_ops); 129 130 refcount_inc(&cvm_ops->refcnt); 131 132 if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->open) 133 cvm_ops->host_vm_ops->open(vma); 134 } 135 136 static void 137 coda_vm_close(struct vm_area_struct *vma) 138 { 139 struct coda_vm_ops *cvm_ops = 140 container_of(vma->vm_ops, struct coda_vm_ops, vm_ops); 141 142 if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->close) 143 cvm_ops->host_vm_ops->close(vma); 144 145 if (refcount_dec_and_test(&cvm_ops->refcnt)) { 146 vma->vm_ops = cvm_ops->host_vm_ops; 147 fput(cvm_ops->coda_file); 148 kfree(cvm_ops); 149 } 150 } 151 152 static int 153 coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma) 154 { 155 struct inode *coda_inode = file_inode(coda_file); 156 struct coda_file_info *cfi = coda_ftoc(coda_file); 157 struct file *host_file = cfi->cfi_container; 158 struct inode *host_inode = file_inode(host_file); 159 struct coda_inode_info *cii; 160 struct coda_vm_ops *cvm_ops; 161 loff_t ppos; 162 size_t count; 163 int ret; 164 165 if (!host_file->f_op->mmap) 166 return -ENODEV; 167 168 if (WARN_ON(coda_file != vma->vm_file)) 169 return -EIO; 170 171 count = vma->vm_end - vma->vm_start; 172 ppos = vma->vm_pgoff * PAGE_SIZE; 173 174 ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode), 175 &cfi->cfi_access_intent, 176 count, ppos, CODA_ACCESS_TYPE_MMAP); 177 if (ret) 178 return ret; 179 180 cvm_ops = kmalloc(sizeof(struct coda_vm_ops), GFP_KERNEL); 181 if (!cvm_ops) 182 return -ENOMEM; 183 184 cii = ITOC(coda_inode); 185 spin_lock(&cii->c_lock); 186 coda_file->f_mapping = host_file->f_mapping; 187 if (coda_inode->i_mapping == &coda_inode->i_data) 188 coda_inode->i_mapping = host_inode->i_mapping; 189 190 /* only allow additional mmaps as long as userspace isn't changing 191 * the container file on us! */ 192 else if (coda_inode->i_mapping != host_inode->i_mapping) { 193 spin_unlock(&cii->c_lock); 194 kfree(cvm_ops); 195 return -EBUSY; 196 } 197 198 /* keep track of how often the coda_inode/host_file has been mmapped */ 199 cii->c_mapcount++; 200 cfi->cfi_mapcount++; 201 spin_unlock(&cii->c_lock); 202 203 vma->vm_file = get_file(host_file); 204 ret = call_mmap(vma->vm_file, vma); 205 206 if (ret) { 207 /* if call_mmap fails, our caller will put host_file so we 208 * should drop the reference to the coda_file that we got. 209 */ 210 fput(coda_file); 211 kfree(cvm_ops); 212 } else { 213 /* here we add redirects for the open/close vm_operations */ 214 cvm_ops->host_vm_ops = vma->vm_ops; 215 if (vma->vm_ops) 216 cvm_ops->vm_ops = *vma->vm_ops; 217 218 cvm_ops->vm_ops.open = coda_vm_open; 219 cvm_ops->vm_ops.close = coda_vm_close; 220 cvm_ops->coda_file = coda_file; 221 refcount_set(&cvm_ops->refcnt, 1); 222 223 vma->vm_ops = &cvm_ops->vm_ops; 224 } 225 return ret; 226 } 227 228 int coda_open(struct inode *coda_inode, struct file *coda_file) 229 { 230 struct file *host_file = NULL; 231 int error; 232 unsigned short flags = coda_file->f_flags & (~O_EXCL); 233 unsigned short coda_flags = coda_flags_to_cflags(flags); 234 struct coda_file_info *cfi; 235 236 cfi = kmalloc(sizeof(struct coda_file_info), GFP_KERNEL); 237 if (!cfi) 238 return -ENOMEM; 239 240 error = venus_open(coda_inode->i_sb, coda_i2f(coda_inode), coda_flags, 241 &host_file); 242 if (!host_file) 243 error = -EIO; 244 245 if (error) { 246 kfree(cfi); 247 return error; 248 } 249 250 host_file->f_flags |= coda_file->f_flags & (O_APPEND | O_SYNC); 251 252 cfi->cfi_magic = CODA_MAGIC; 253 cfi->cfi_mapcount = 0; 254 cfi->cfi_container = host_file; 255 /* assume access intents are supported unless we hear otherwise */ 256 cfi->cfi_access_intent = true; 257 258 BUG_ON(coda_file->private_data != NULL); 259 coda_file->private_data = cfi; 260 return 0; 261 } 262 263 int coda_release(struct inode *coda_inode, struct file *coda_file) 264 { 265 unsigned short flags = (coda_file->f_flags) & (~O_EXCL); 266 unsigned short coda_flags = coda_flags_to_cflags(flags); 267 struct coda_file_info *cfi; 268 struct coda_inode_info *cii; 269 struct inode *host_inode; 270 271 cfi = coda_ftoc(coda_file); 272 273 venus_close(coda_inode->i_sb, coda_i2f(coda_inode), 274 coda_flags, coda_file->f_cred->fsuid); 275 276 host_inode = file_inode(cfi->cfi_container); 277 cii = ITOC(coda_inode); 278 279 /* did we mmap this file? */ 280 spin_lock(&cii->c_lock); 281 if (coda_inode->i_mapping == &host_inode->i_data) { 282 cii->c_mapcount -= cfi->cfi_mapcount; 283 if (!cii->c_mapcount) 284 coda_inode->i_mapping = &coda_inode->i_data; 285 } 286 spin_unlock(&cii->c_lock); 287 288 fput(cfi->cfi_container); 289 kfree(coda_file->private_data); 290 coda_file->private_data = NULL; 291 292 /* VFS fput ignores the return value from file_operations->release, so 293 * there is no use returning an error here */ 294 return 0; 295 } 296 297 int coda_fsync(struct file *coda_file, loff_t start, loff_t end, int datasync) 298 { 299 struct file *host_file; 300 struct inode *coda_inode = file_inode(coda_file); 301 struct coda_file_info *cfi; 302 int err; 303 304 if (!(S_ISREG(coda_inode->i_mode) || S_ISDIR(coda_inode->i_mode) || 305 S_ISLNK(coda_inode->i_mode))) 306 return -EINVAL; 307 308 err = filemap_write_and_wait_range(coda_inode->i_mapping, start, end); 309 if (err) 310 return err; 311 inode_lock(coda_inode); 312 313 cfi = coda_ftoc(coda_file); 314 host_file = cfi->cfi_container; 315 316 err = vfs_fsync(host_file, datasync); 317 if (!err && !datasync) 318 err = venus_fsync(coda_inode->i_sb, coda_i2f(coda_inode)); 319 inode_unlock(coda_inode); 320 321 return err; 322 } 323 324 const struct file_operations coda_file_operations = { 325 .llseek = generic_file_llseek, 326 .read_iter = coda_file_read_iter, 327 .write_iter = coda_file_write_iter, 328 .mmap = coda_file_mmap, 329 .open = coda_open, 330 .release = coda_release, 331 .fsync = coda_fsync, 332 .splice_read = coda_file_splice_read, 333 }; 334