1 /* 2 * linux/fs/ext4/file.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * from 10 * 11 * linux/fs/minix/file.c 12 * 13 * Copyright (C) 1991, 1992 Linus Torvalds 14 * 15 * ext4 fs regular file handling primitives 16 * 17 * 64-bit file support on 64-bit platforms by Jakub Jelinek 18 * (jj@sunsite.ms.mff.cuni.cz) 19 */ 20 21 #include <linux/time.h> 22 #include <linux/fs.h> 23 #include <linux/jbd2.h> 24 #include <linux/mount.h> 25 #include <linux/path.h> 26 #include <linux/quotaops.h> 27 #include "ext4.h" 28 #include "ext4_jbd2.h" 29 #include "xattr.h" 30 #include "acl.h" 31 32 /* 33 * Called when an inode is released. Note that this is different 34 * from ext4_file_open: open gets called at every open, but release 35 * gets called only when /all/ the files are closed. 36 */ 37 static int ext4_release_file(struct inode *inode, struct file *filp) 38 { 39 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) { 40 ext4_alloc_da_blocks(inode); 41 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); 42 } 43 /* if we are the last writer on the inode, drop the block reservation */ 44 if ((filp->f_mode & FMODE_WRITE) && 45 (atomic_read(&inode->i_writecount) == 1) && 46 !EXT4_I(inode)->i_reserved_data_blocks) 47 { 48 down_write(&EXT4_I(inode)->i_data_sem); 49 ext4_discard_preallocations(inode); 50 up_write(&EXT4_I(inode)->i_data_sem); 51 } 52 if (is_dx(inode) && filp->private_data) 53 ext4_htree_free_dir_info(filp->private_data); 54 55 return 0; 56 } 57 58 static void ext4_aiodio_wait(struct inode *inode) 59 { 60 wait_queue_head_t *wq = ext4_ioend_wq(inode); 61 62 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_aiodio_unwritten) == 0)); 63 } 64 65 /* 66 * This tests whether the IO in question is block-aligned or not. 67 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they 68 * are converted to written only after the IO is complete. Until they are 69 * mapped, these blocks appear as holes, so dio_zero_block() will assume that 70 * it needs to zero out portions of the start and/or end block. If 2 AIO 71 * threads are at work on the same unwritten block, they must be synchronized 72 * or one thread will zero the other's data, causing corruption. 73 */ 74 static int 75 ext4_unaligned_aio(struct inode *inode, const struct iovec *iov, 76 unsigned long nr_segs, loff_t pos) 77 { 78 struct super_block *sb = inode->i_sb; 79 int blockmask = sb->s_blocksize - 1; 80 size_t count = iov_length(iov, nr_segs); 81 loff_t final_size = pos + count; 82 83 if (pos >= inode->i_size) 84 return 0; 85 86 if ((pos & blockmask) || (final_size & blockmask)) 87 return 1; 88 89 return 0; 90 } 91 92 static ssize_t 93 ext4_file_write(struct kiocb *iocb, const struct iovec *iov, 94 unsigned long nr_segs, loff_t pos) 95 { 96 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; 97 int unaligned_aio = 0; 98 int ret; 99 100 /* 101 * If we have encountered a bitmap-format file, the size limit 102 * is smaller than s_maxbytes, which is for extent-mapped files. 103 */ 104 105 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 106 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 107 size_t length = iov_length(iov, nr_segs); 108 109 if ((pos > sbi->s_bitmap_maxbytes || 110 (pos == sbi->s_bitmap_maxbytes && length > 0))) 111 return -EFBIG; 112 113 if (pos + length > sbi->s_bitmap_maxbytes) { 114 nr_segs = iov_shorten((struct iovec *)iov, nr_segs, 115 sbi->s_bitmap_maxbytes - pos); 116 } 117 } else if (unlikely((iocb->ki_filp->f_flags & O_DIRECT) && 118 !is_sync_kiocb(iocb))) { 119 unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos); 120 } 121 122 /* Unaligned direct AIO must be serialized; see comment above */ 123 if (unaligned_aio) { 124 static unsigned long unaligned_warn_time; 125 126 /* Warn about this once per day */ 127 if (printk_timed_ratelimit(&unaligned_warn_time, 60*60*24*HZ)) 128 ext4_msg(inode->i_sb, KERN_WARNING, 129 "Unaligned AIO/DIO on inode %ld by %s; " 130 "performance will be poor.", 131 inode->i_ino, current->comm); 132 mutex_lock(ext4_aio_mutex(inode)); 133 ext4_aiodio_wait(inode); 134 } 135 136 ret = generic_file_aio_write(iocb, iov, nr_segs, pos); 137 138 if (unaligned_aio) 139 mutex_unlock(ext4_aio_mutex(inode)); 140 141 return ret; 142 } 143 144 static const struct vm_operations_struct ext4_file_vm_ops = { 145 .fault = filemap_fault, 146 .page_mkwrite = ext4_page_mkwrite, 147 }; 148 149 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) 150 { 151 struct address_space *mapping = file->f_mapping; 152 153 if (!mapping->a_ops->readpage) 154 return -ENOEXEC; 155 file_accessed(file); 156 vma->vm_ops = &ext4_file_vm_ops; 157 vma->vm_flags |= VM_CAN_NONLINEAR; 158 return 0; 159 } 160 161 static int ext4_file_open(struct inode * inode, struct file * filp) 162 { 163 struct super_block *sb = inode->i_sb; 164 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 165 struct ext4_inode_info *ei = EXT4_I(inode); 166 struct vfsmount *mnt = filp->f_path.mnt; 167 struct path path; 168 char buf[64], *cp; 169 170 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) && 171 !(sb->s_flags & MS_RDONLY))) { 172 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED; 173 /* 174 * Sample where the filesystem has been mounted and 175 * store it in the superblock for sysadmin convenience 176 * when trying to sort through large numbers of block 177 * devices or filesystem images. 178 */ 179 memset(buf, 0, sizeof(buf)); 180 path.mnt = mnt; 181 path.dentry = mnt->mnt_root; 182 cp = d_path(&path, buf, sizeof(buf)); 183 if (!IS_ERR(cp)) { 184 strlcpy(sbi->s_es->s_last_mounted, cp, 185 sizeof(sbi->s_es->s_last_mounted)); 186 ext4_mark_super_dirty(sb); 187 } 188 } 189 /* 190 * Set up the jbd2_inode if we are opening the inode for 191 * writing and the journal is present 192 */ 193 if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) { 194 struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL); 195 196 spin_lock(&inode->i_lock); 197 if (!ei->jinode) { 198 if (!jinode) { 199 spin_unlock(&inode->i_lock); 200 return -ENOMEM; 201 } 202 ei->jinode = jinode; 203 jbd2_journal_init_jbd_inode(ei->jinode, inode); 204 jinode = NULL; 205 } 206 spin_unlock(&inode->i_lock); 207 if (unlikely(jinode != NULL)) 208 jbd2_free_inode(jinode); 209 } 210 return dquot_file_open(inode, filp); 211 } 212 213 /* 214 * ext4_llseek() copied from generic_file_llseek() to handle both 215 * block-mapped and extent-mapped maxbytes values. This should 216 * otherwise be identical with generic_file_llseek(). 217 */ 218 loff_t ext4_llseek(struct file *file, loff_t offset, int origin) 219 { 220 struct inode *inode = file->f_mapping->host; 221 loff_t maxbytes; 222 223 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 224 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes; 225 else 226 maxbytes = inode->i_sb->s_maxbytes; 227 228 return generic_file_llseek_size(file, offset, origin, maxbytes); 229 } 230 231 const struct file_operations ext4_file_operations = { 232 .llseek = ext4_llseek, 233 .read = do_sync_read, 234 .write = do_sync_write, 235 .aio_read = generic_file_aio_read, 236 .aio_write = ext4_file_write, 237 .unlocked_ioctl = ext4_ioctl, 238 #ifdef CONFIG_COMPAT 239 .compat_ioctl = ext4_compat_ioctl, 240 #endif 241 .mmap = ext4_file_mmap, 242 .open = ext4_file_open, 243 .release = ext4_release_file, 244 .fsync = ext4_sync_file, 245 .splice_read = generic_file_splice_read, 246 .splice_write = generic_file_splice_write, 247 .fallocate = ext4_fallocate, 248 }; 249 250 const struct inode_operations ext4_file_inode_operations = { 251 .setattr = ext4_setattr, 252 .getattr = ext4_getattr, 253 #ifdef CONFIG_EXT4_FS_XATTR 254 .setxattr = generic_setxattr, 255 .getxattr = generic_getxattr, 256 .listxattr = ext4_listxattr, 257 .removexattr = generic_removexattr, 258 #endif 259 .get_acl = ext4_get_acl, 260 .fiemap = ext4_fiemap, 261 }; 262 263