1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * File operations for Coda. 4 * Original version: (C) 1996 Peter Braam 5 * Rewritten for Linux 2.1: (C) 1997 Carnegie Mellon University 6 * 7 * Carnegie Mellon encourages users of this code to contribute improvements 8 * to the Coda project. Contact Peter Braam <coda@cs.cmu.edu>. 9 */ 10 11 #include <linux/types.h> 12 #include <linux/kernel.h> 13 #include <linux/time.h> 14 #include <linux/file.h> 15 #include <linux/fs.h> 16 #include <linux/stat.h> 17 #include <linux/cred.h> 18 #include <linux/errno.h> 19 #include <linux/spinlock.h> 20 #include <linux/string.h> 21 #include <linux/slab.h> 22 #include <linux/uaccess.h> 23 #include <linux/uio.h> 24 25 #include <linux/coda.h> 26 #include "coda_psdev.h" 27 #include "coda_linux.h" 28 #include "coda_int.h" 29 30 struct coda_vm_ops { 31 atomic_t refcnt; 32 struct file *coda_file; 33 const struct vm_operations_struct *host_vm_ops; 34 struct vm_operations_struct vm_ops; 35 }; 36 37 static ssize_t 38 coda_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 39 { 40 struct file *coda_file = iocb->ki_filp; 41 struct inode *coda_inode = file_inode(coda_file); 42 struct coda_file_info *cfi = coda_ftoc(coda_file); 43 loff_t ki_pos = iocb->ki_pos; 44 size_t count = iov_iter_count(to); 45 ssize_t ret; 46 47 ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode), 48 &cfi->cfi_access_intent, 49 count, ki_pos, CODA_ACCESS_TYPE_READ); 50 if (ret) 51 goto finish_read; 52 53 ret = vfs_iter_read(cfi->cfi_container, to, &iocb->ki_pos, 0); 54 55 finish_read: 56 venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode), 57 &cfi->cfi_access_intent, 58 count, ki_pos, CODA_ACCESS_TYPE_READ_FINISH); 59 return ret; 60 } 61 62 static ssize_t 63 coda_file_write_iter(struct kiocb *iocb, struct iov_iter *to) 64 { 65 struct file *coda_file = iocb->ki_filp; 66 struct inode *coda_inode = file_inode(coda_file); 67 struct coda_file_info *cfi = coda_ftoc(coda_file); 68 struct file *host_file = cfi->cfi_container; 69 loff_t ki_pos = iocb->ki_pos; 70 size_t count = iov_iter_count(to); 71 ssize_t ret; 72 73 ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode), 74 &cfi->cfi_access_intent, 75 count, ki_pos, CODA_ACCESS_TYPE_WRITE); 76 if (ret) 77 goto finish_write; 78 79 file_start_write(host_file); 80 inode_lock(coda_inode); 81 ret = vfs_iter_write(cfi->cfi_container, to, &iocb->ki_pos, 0); 82 coda_inode->i_size = file_inode(host_file)->i_size; 83 coda_inode->i_blocks = (coda_inode->i_size + 511) >> 9; 84 coda_inode->i_mtime = coda_inode->i_ctime = current_time(coda_inode); 85 inode_unlock(coda_inode); 86 file_end_write(host_file); 87 88 finish_write: 89 venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode), 90 &cfi->cfi_access_intent, 91 count, ki_pos, CODA_ACCESS_TYPE_WRITE_FINISH); 92 return ret; 93 } 94 95 static void 96 coda_vm_open(struct vm_area_struct *vma) 97 { 98 struct coda_vm_ops *cvm_ops = 99 container_of(vma->vm_ops, struct coda_vm_ops, vm_ops); 100 101 atomic_inc(&cvm_ops->refcnt); 102 103 if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->open) 104 cvm_ops->host_vm_ops->open(vma); 105 } 106 107 static void 108 coda_vm_close(struct vm_area_struct *vma) 109 { 110 struct coda_vm_ops *cvm_ops = 111 container_of(vma->vm_ops, struct coda_vm_ops, vm_ops); 112 113 if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->close) 114 cvm_ops->host_vm_ops->close(vma); 115 116 if (atomic_dec_and_test(&cvm_ops->refcnt)) { 117 vma->vm_ops = cvm_ops->host_vm_ops; 118 fput(cvm_ops->coda_file); 119 kfree(cvm_ops); 120 } 121 } 122 123 static int 124 coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma) 125 { 126 struct inode *coda_inode = file_inode(coda_file); 127 struct coda_file_info *cfi = coda_ftoc(coda_file); 128 struct file *host_file = cfi->cfi_container; 129 struct inode *host_inode = file_inode(host_file); 130 struct coda_inode_info *cii; 131 struct coda_vm_ops *cvm_ops; 132 loff_t ppos; 133 size_t count; 134 int ret; 135 136 if (!host_file->f_op->mmap) 137 return -ENODEV; 138 139 if (WARN_ON(coda_file != vma->vm_file)) 140 return -EIO; 141 142 count = vma->vm_end - vma->vm_start; 143 ppos = vma->vm_pgoff * PAGE_SIZE; 144 145 ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode), 146 &cfi->cfi_access_intent, 147 count, ppos, CODA_ACCESS_TYPE_MMAP); 148 if (ret) 149 return ret; 150 151 cvm_ops = kmalloc(sizeof(struct coda_vm_ops), GFP_KERNEL); 152 if (!cvm_ops) 153 return -ENOMEM; 154 155 cii = ITOC(coda_inode); 156 spin_lock(&cii->c_lock); 157 coda_file->f_mapping = host_file->f_mapping; 158 if (coda_inode->i_mapping == &coda_inode->i_data) 159 coda_inode->i_mapping = host_inode->i_mapping; 160 161 /* only allow additional mmaps as long as userspace isn't changing 162 * the container file on us! */ 163 else if (coda_inode->i_mapping != host_inode->i_mapping) { 164 spin_unlock(&cii->c_lock); 165 kfree(cvm_ops); 166 return -EBUSY; 167 } 168 169 /* keep track of how often the coda_inode/host_file has been mmapped */ 170 cii->c_mapcount++; 171 cfi->cfi_mapcount++; 172 spin_unlock(&cii->c_lock); 173 174 vma->vm_file = get_file(host_file); 175 ret = call_mmap(vma->vm_file, vma); 176 177 if (ret) { 178 /* if call_mmap fails, our caller will put coda_file so we 179 * should drop the reference to the host_file that we got. 180 */ 181 fput(host_file); 182 kfree(cvm_ops); 183 } else { 184 /* here we add redirects for the open/close vm_operations */ 185 cvm_ops->host_vm_ops = vma->vm_ops; 186 if (vma->vm_ops) 187 cvm_ops->vm_ops = *vma->vm_ops; 188 189 cvm_ops->vm_ops.open = coda_vm_open; 190 cvm_ops->vm_ops.close = coda_vm_close; 191 cvm_ops->coda_file = coda_file; 192 atomic_set(&cvm_ops->refcnt, 1); 193 194 vma->vm_ops = &cvm_ops->vm_ops; 195 } 196 return ret; 197 } 198 199 int coda_open(struct inode *coda_inode, struct file *coda_file) 200 { 201 struct file *host_file = NULL; 202 int error; 203 unsigned short flags = coda_file->f_flags & (~O_EXCL); 204 unsigned short coda_flags = coda_flags_to_cflags(flags); 205 struct coda_file_info *cfi; 206 207 cfi = kmalloc(sizeof(struct coda_file_info), GFP_KERNEL); 208 if (!cfi) 209 return -ENOMEM; 210 211 error = venus_open(coda_inode->i_sb, coda_i2f(coda_inode), coda_flags, 212 &host_file); 213 if (!host_file) 214 error = -EIO; 215 216 if (error) { 217 kfree(cfi); 218 return error; 219 } 220 221 host_file->f_flags |= coda_file->f_flags & (O_APPEND | O_SYNC); 222 223 cfi->cfi_magic = CODA_MAGIC; 224 cfi->cfi_mapcount = 0; 225 cfi->cfi_container = host_file; 226 /* assume access intents are supported unless we hear otherwise */ 227 cfi->cfi_access_intent = true; 228 229 BUG_ON(coda_file->private_data != NULL); 230 coda_file->private_data = cfi; 231 return 0; 232 } 233 234 int coda_release(struct inode *coda_inode, struct file *coda_file) 235 { 236 unsigned short flags = (coda_file->f_flags) & (~O_EXCL); 237 unsigned short coda_flags = coda_flags_to_cflags(flags); 238 struct coda_file_info *cfi; 239 struct coda_inode_info *cii; 240 struct inode *host_inode; 241 int err; 242 243 cfi = coda_ftoc(coda_file); 244 245 err = venus_close(coda_inode->i_sb, coda_i2f(coda_inode), 246 coda_flags, coda_file->f_cred->fsuid); 247 248 host_inode = file_inode(cfi->cfi_container); 249 cii = ITOC(coda_inode); 250 251 /* did we mmap this file? */ 252 spin_lock(&cii->c_lock); 253 if (coda_inode->i_mapping == &host_inode->i_data) { 254 cii->c_mapcount -= cfi->cfi_mapcount; 255 if (!cii->c_mapcount) 256 coda_inode->i_mapping = &coda_inode->i_data; 257 } 258 spin_unlock(&cii->c_lock); 259 260 fput(cfi->cfi_container); 261 kfree(coda_file->private_data); 262 coda_file->private_data = NULL; 263 264 /* VFS fput ignores the return value from file_operations->release, so 265 * there is no use returning an error here */ 266 return 0; 267 } 268 269 int coda_fsync(struct file *coda_file, loff_t start, loff_t end, int datasync) 270 { 271 struct file *host_file; 272 struct inode *coda_inode = file_inode(coda_file); 273 struct coda_file_info *cfi; 274 int err; 275 276 if (!(S_ISREG(coda_inode->i_mode) || S_ISDIR(coda_inode->i_mode) || 277 S_ISLNK(coda_inode->i_mode))) 278 return -EINVAL; 279 280 err = filemap_write_and_wait_range(coda_inode->i_mapping, start, end); 281 if (err) 282 return err; 283 inode_lock(coda_inode); 284 285 cfi = coda_ftoc(coda_file); 286 host_file = cfi->cfi_container; 287 288 err = vfs_fsync(host_file, datasync); 289 if (!err && !datasync) 290 err = venus_fsync(coda_inode->i_sb, coda_i2f(coda_inode)); 291 inode_unlock(coda_inode); 292 293 return err; 294 } 295 296 const struct file_operations coda_file_operations = { 297 .llseek = generic_file_llseek, 298 .read_iter = coda_file_read_iter, 299 .write_iter = coda_file_write_iter, 300 .mmap = coda_file_mmap, 301 .open = coda_open, 302 .release = coda_release, 303 .fsync = coda_fsync, 304 .splice_read = generic_file_splice_read, 305 }; 306