1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds * linux/fs/ext2/file.c
41da177e4SLinus Torvalds *
51da177e4SLinus Torvalds * Copyright (C) 1992, 1993, 1994, 1995
61da177e4SLinus Torvalds * Remy Card (card@masi.ibp.fr)
71da177e4SLinus Torvalds * Laboratoire MASI - Institut Blaise Pascal
81da177e4SLinus Torvalds * Universite Pierre et Marie Curie (Paris VI)
91da177e4SLinus Torvalds *
101da177e4SLinus Torvalds * from
111da177e4SLinus Torvalds *
121da177e4SLinus Torvalds * linux/fs/minix/file.c
131da177e4SLinus Torvalds *
141da177e4SLinus Torvalds * Copyright (C) 1991, 1992 Linus Torvalds
151da177e4SLinus Torvalds *
161da177e4SLinus Torvalds * ext2 fs regular file handling primitives
171da177e4SLinus Torvalds *
181da177e4SLinus Torvalds * 64-bit file support on 64-bit platforms by Jakub Jelinek
191da177e4SLinus Torvalds * (jj@sunsite.ms.mff.cuni.cz)
201da177e4SLinus Torvalds */
211da177e4SLinus Torvalds
221da177e4SLinus Torvalds #include <linux/time.h>
2348bde86dSJan Kara #include <linux/pagemap.h>
24c94c2acfSMatthew Wilcox #include <linux/dax.h>
25871a2931SChristoph Hellwig #include <linux/quotaops.h>
2625f4e702SChristoph Hellwig #include <linux/iomap.h>
2725f4e702SChristoph Hellwig #include <linux/uio.h>
28d0530704SRitesh Harjani (IBM) #include <linux/buffer_head.h>
291da177e4SLinus Torvalds #include "ext2.h"
301da177e4SLinus Torvalds #include "xattr.h"
311da177e4SLinus Torvalds #include "acl.h"
326e335cd7SRitesh Harjani (IBM) #include "trace.h"
331da177e4SLinus Torvalds
346cd176a5SMatthew Wilcox #ifdef CONFIG_FS_DAX
ext2_dax_read_iter(struct kiocb * iocb,struct iov_iter * to)3525f4e702SChristoph Hellwig static ssize_t ext2_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
3625f4e702SChristoph Hellwig {
3725f4e702SChristoph Hellwig struct inode *inode = iocb->ki_filp->f_mapping->host;
3825f4e702SChristoph Hellwig ssize_t ret;
3925f4e702SChristoph Hellwig
4025f4e702SChristoph Hellwig if (!iov_iter_count(to))
4125f4e702SChristoph Hellwig return 0; /* skip atime */
4225f4e702SChristoph Hellwig
4325f4e702SChristoph Hellwig inode_lock_shared(inode);
4411c59c92SRoss Zwisler ret = dax_iomap_rw(iocb, to, &ext2_iomap_ops);
4525f4e702SChristoph Hellwig inode_unlock_shared(inode);
4625f4e702SChristoph Hellwig
4725f4e702SChristoph Hellwig file_accessed(iocb->ki_filp);
4825f4e702SChristoph Hellwig return ret;
4925f4e702SChristoph Hellwig }
5025f4e702SChristoph Hellwig
ext2_dax_write_iter(struct kiocb * iocb,struct iov_iter * from)5125f4e702SChristoph Hellwig static ssize_t ext2_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
5225f4e702SChristoph Hellwig {
5325f4e702SChristoph Hellwig struct file *file = iocb->ki_filp;
5425f4e702SChristoph Hellwig struct inode *inode = file->f_mapping->host;
5525f4e702SChristoph Hellwig ssize_t ret;
5625f4e702SChristoph Hellwig
5725f4e702SChristoph Hellwig inode_lock(inode);
5825f4e702SChristoph Hellwig ret = generic_write_checks(iocb, from);
5925f4e702SChristoph Hellwig if (ret <= 0)
6025f4e702SChristoph Hellwig goto out_unlock;
6125f4e702SChristoph Hellwig ret = file_remove_privs(file);
6225f4e702SChristoph Hellwig if (ret)
6325f4e702SChristoph Hellwig goto out_unlock;
6425f4e702SChristoph Hellwig ret = file_update_time(file);
6525f4e702SChristoph Hellwig if (ret)
6625f4e702SChristoph Hellwig goto out_unlock;
6725f4e702SChristoph Hellwig
6811c59c92SRoss Zwisler ret = dax_iomap_rw(iocb, from, &ext2_iomap_ops);
6925f4e702SChristoph Hellwig if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
7025f4e702SChristoph Hellwig i_size_write(inode, iocb->ki_pos);
7125f4e702SChristoph Hellwig mark_inode_dirty(inode);
7225f4e702SChristoph Hellwig }
7325f4e702SChristoph Hellwig
7425f4e702SChristoph Hellwig out_unlock:
7525f4e702SChristoph Hellwig inode_unlock(inode);
7625f4e702SChristoph Hellwig if (ret > 0)
7725f4e702SChristoph Hellwig ret = generic_write_sync(iocb, ret);
7825f4e702SChristoph Hellwig return ret;
7925f4e702SChristoph Hellwig }
8025f4e702SChristoph Hellwig
815726b27bSRoss Zwisler /*
825726b27bSRoss Zwisler * The lock ordering for ext2 DAX fault paths is:
835726b27bSRoss Zwisler *
84c1e8d7c6SMichel Lespinasse * mmap_lock (MM)
855726b27bSRoss Zwisler * sb_start_pagefault (vfs, freeze)
8670f3bad8SJan Kara * address_space->invalidate_lock
875726b27bSRoss Zwisler * address_space->i_mmap_rwsem or page_lock (mutually exclusive in DAX)
885726b27bSRoss Zwisler * ext2_inode_info->truncate_mutex
895726b27bSRoss Zwisler *
905726b27bSRoss Zwisler * The default page_lock and i_size verification done by non-DAX fault paths
915726b27bSRoss Zwisler * is sufficient because ext2 doesn't support hole punching.
925726b27bSRoss Zwisler */
ext2_dax_fault(struct vm_fault * vmf)9306856938SSouptick Joarder static vm_fault_t ext2_dax_fault(struct vm_fault *vmf)
94f7ca90b1SMatthew Wilcox {
9511bac800SDave Jiang struct inode *inode = file_inode(vmf->vma->vm_file);
9606856938SSouptick Joarder vm_fault_t ret;
971ef6ea0eSMikulas Patocka bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
981ef6ea0eSMikulas Patocka (vmf->vma->vm_flags & VM_SHARED);
995726b27bSRoss Zwisler
1001ef6ea0eSMikulas Patocka if (write) {
1015726b27bSRoss Zwisler sb_start_pagefault(inode->i_sb);
10211bac800SDave Jiang file_update_time(vmf->vma->vm_file);
1035726b27bSRoss Zwisler }
10470f3bad8SJan Kara filemap_invalidate_lock_shared(inode->i_mapping);
1055726b27bSRoss Zwisler
106*1d024e7aSMatthew Wilcox (Oracle) ret = dax_iomap_fault(vmf, 0, NULL, NULL, &ext2_iomap_ops);
1075726b27bSRoss Zwisler
10870f3bad8SJan Kara filemap_invalidate_unlock_shared(inode->i_mapping);
1091ef6ea0eSMikulas Patocka if (write)
1105726b27bSRoss Zwisler sb_end_pagefault(inode->i_sb);
1115726b27bSRoss Zwisler return ret;
112f7ca90b1SMatthew Wilcox }
113f7ca90b1SMatthew Wilcox
114f7ca90b1SMatthew Wilcox static const struct vm_operations_struct ext2_dax_vm_ops = {
115f7ca90b1SMatthew Wilcox .fault = ext2_dax_fault,
11603e0990fSRoss Zwisler /*
117a2d58167SDave Jiang * .huge_fault is not supported for DAX because allocation in ext2
11803e0990fSRoss Zwisler * cannot be reliably aligned to huge page sizes and so pmd faults
11903e0990fSRoss Zwisler * will always fail and fail back to regular faults.
12003e0990fSRoss Zwisler */
1211e9d180bSRoss Zwisler .page_mkwrite = ext2_dax_fault,
12291d25ba8SRoss Zwisler .pfn_mkwrite = ext2_dax_fault,
123f7ca90b1SMatthew Wilcox };
124f7ca90b1SMatthew Wilcox
ext2_file_mmap(struct file * file,struct vm_area_struct * vma)125f7ca90b1SMatthew Wilcox static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma)
126f7ca90b1SMatthew Wilcox {
127f7ca90b1SMatthew Wilcox if (!IS_DAX(file_inode(file)))
128f7ca90b1SMatthew Wilcox return generic_file_mmap(file, vma);
129f7ca90b1SMatthew Wilcox
130f7ca90b1SMatthew Wilcox file_accessed(file);
131f7ca90b1SMatthew Wilcox vma->vm_ops = &ext2_dax_vm_ops;
132f7ca90b1SMatthew Wilcox return 0;
133f7ca90b1SMatthew Wilcox }
134f7ca90b1SMatthew Wilcox #else
135f7ca90b1SMatthew Wilcox #define ext2_file_mmap generic_file_mmap
136f7ca90b1SMatthew Wilcox #endif
137f7ca90b1SMatthew Wilcox
1381da177e4SLinus Torvalds /*
139a6739af8SJan Kara * Called when filp is released. This happens when all file descriptors
140a6739af8SJan Kara * for a single struct file are closed. Note that different open() calls
141a6739af8SJan Kara * for the same file yield different struct file structures.
1421da177e4SLinus Torvalds */
ext2_release_file(struct inode * inode,struct file * filp)1431da177e4SLinus Torvalds static int ext2_release_file (struct inode * inode, struct file * filp)
1441da177e4SLinus Torvalds {
145a686cd89SMartin J. Bligh if (filp->f_mode & FMODE_WRITE) {
146a686cd89SMartin J. Bligh mutex_lock(&EXT2_I(inode)->truncate_mutex);
147a686cd89SMartin J. Bligh ext2_discard_reservation(inode);
148a686cd89SMartin J. Bligh mutex_unlock(&EXT2_I(inode)->truncate_mutex);
149a686cd89SMartin J. Bligh }
1501da177e4SLinus Torvalds return 0;
1511da177e4SLinus Torvalds }
1521da177e4SLinus Torvalds
ext2_fsync(struct file * file,loff_t start,loff_t end,int datasync)15302c24a82SJosef Bacik int ext2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
15448bde86dSJan Kara {
15548bde86dSJan Kara int ret;
1567ea80859SChristoph Hellwig struct super_block *sb = file->f_mapping->host->i_sb;
15748bde86dSJan Kara
158d0530704SRitesh Harjani (IBM) ret = generic_buffers_fsync(file, start, end, datasync);
159dac257f7SJeff Layton if (ret == -EIO)
16048bde86dSJan Kara /* We don't really know where the IO error happened... */
16148bde86dSJan Kara ext2_error(sb, __func__,
16248bde86dSJan Kara "detected IO error when writing metadata buffers");
16348bde86dSJan Kara return ret;
16448bde86dSJan Kara }
16548bde86dSJan Kara
ext2_dio_read_iter(struct kiocb * iocb,struct iov_iter * to)166fb5de435SRitesh Harjani (IBM) static ssize_t ext2_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
167fb5de435SRitesh Harjani (IBM) {
168fb5de435SRitesh Harjani (IBM) struct file *file = iocb->ki_filp;
169fb5de435SRitesh Harjani (IBM) struct inode *inode = file->f_mapping->host;
170fb5de435SRitesh Harjani (IBM) ssize_t ret;
171fb5de435SRitesh Harjani (IBM)
1726e335cd7SRitesh Harjani (IBM) trace_ext2_dio_read_begin(iocb, to, 0);
173fb5de435SRitesh Harjani (IBM) inode_lock_shared(inode);
174fb5de435SRitesh Harjani (IBM) ret = iomap_dio_rw(iocb, to, &ext2_iomap_ops, NULL, 0, NULL, 0);
175fb5de435SRitesh Harjani (IBM) inode_unlock_shared(inode);
1766e335cd7SRitesh Harjani (IBM) trace_ext2_dio_read_end(iocb, to, ret);
177fb5de435SRitesh Harjani (IBM)
178fb5de435SRitesh Harjani (IBM) return ret;
179fb5de435SRitesh Harjani (IBM) }
180fb5de435SRitesh Harjani (IBM)
ext2_dio_write_end_io(struct kiocb * iocb,ssize_t size,int error,unsigned int flags)181fb5de435SRitesh Harjani (IBM) static int ext2_dio_write_end_io(struct kiocb *iocb, ssize_t size,
182fb5de435SRitesh Harjani (IBM) int error, unsigned int flags)
183fb5de435SRitesh Harjani (IBM) {
184fb5de435SRitesh Harjani (IBM) loff_t pos = iocb->ki_pos;
185fb5de435SRitesh Harjani (IBM) struct inode *inode = file_inode(iocb->ki_filp);
186fb5de435SRitesh Harjani (IBM)
187fb5de435SRitesh Harjani (IBM) if (error)
188fb5de435SRitesh Harjani (IBM) goto out;
189fb5de435SRitesh Harjani (IBM)
190fb5de435SRitesh Harjani (IBM) /*
191fb5de435SRitesh Harjani (IBM) * If we are extending the file, we have to update i_size here before
192fb5de435SRitesh Harjani (IBM) * page cache gets invalidated in iomap_dio_rw(). This prevents racing
193fb5de435SRitesh Harjani (IBM) * buffered reads from zeroing out too much from page cache pages.
194fb5de435SRitesh Harjani (IBM) * Note that all extending writes always happens synchronously with
195fb5de435SRitesh Harjani (IBM) * inode lock held by ext2_dio_write_iter(). So it is safe to update
196fb5de435SRitesh Harjani (IBM) * inode size here for extending file writes.
197fb5de435SRitesh Harjani (IBM) */
198fb5de435SRitesh Harjani (IBM) pos += size;
199fb5de435SRitesh Harjani (IBM) if (pos > i_size_read(inode)) {
200fb5de435SRitesh Harjani (IBM) i_size_write(inode, pos);
201fb5de435SRitesh Harjani (IBM) mark_inode_dirty(inode);
202fb5de435SRitesh Harjani (IBM) }
203fb5de435SRitesh Harjani (IBM) out:
2046e335cd7SRitesh Harjani (IBM) trace_ext2_dio_write_endio(iocb, size, error);
205fb5de435SRitesh Harjani (IBM) return error;
206fb5de435SRitesh Harjani (IBM) }
207fb5de435SRitesh Harjani (IBM)
208fb5de435SRitesh Harjani (IBM) static const struct iomap_dio_ops ext2_dio_write_ops = {
209fb5de435SRitesh Harjani (IBM) .end_io = ext2_dio_write_end_io,
210fb5de435SRitesh Harjani (IBM) };
211fb5de435SRitesh Harjani (IBM)
ext2_dio_write_iter(struct kiocb * iocb,struct iov_iter * from)212fb5de435SRitesh Harjani (IBM) static ssize_t ext2_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
213fb5de435SRitesh Harjani (IBM) {
214fb5de435SRitesh Harjani (IBM) struct file *file = iocb->ki_filp;
215fb5de435SRitesh Harjani (IBM) struct inode *inode = file->f_mapping->host;
216fb5de435SRitesh Harjani (IBM) ssize_t ret;
217fb5de435SRitesh Harjani (IBM) unsigned int flags = 0;
218fb5de435SRitesh Harjani (IBM) unsigned long blocksize = inode->i_sb->s_blocksize;
219fb5de435SRitesh Harjani (IBM) loff_t offset = iocb->ki_pos;
220fb5de435SRitesh Harjani (IBM) loff_t count = iov_iter_count(from);
2216e335cd7SRitesh Harjani (IBM) ssize_t status = 0;
222fb5de435SRitesh Harjani (IBM)
2236e335cd7SRitesh Harjani (IBM) trace_ext2_dio_write_begin(iocb, from, 0);
224fb5de435SRitesh Harjani (IBM) inode_lock(inode);
225fb5de435SRitesh Harjani (IBM) ret = generic_write_checks(iocb, from);
226fb5de435SRitesh Harjani (IBM) if (ret <= 0)
227fb5de435SRitesh Harjani (IBM) goto out_unlock;
228fb5de435SRitesh Harjani (IBM)
229fb5de435SRitesh Harjani (IBM) ret = kiocb_modified(iocb);
230fb5de435SRitesh Harjani (IBM) if (ret)
231fb5de435SRitesh Harjani (IBM) goto out_unlock;
232fb5de435SRitesh Harjani (IBM)
233fb5de435SRitesh Harjani (IBM) /* use IOMAP_DIO_FORCE_WAIT for unaligned or extending writes */
234fb5de435SRitesh Harjani (IBM) if (iocb->ki_pos + iov_iter_count(from) > i_size_read(inode) ||
235fb5de435SRitesh Harjani (IBM) (!IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(from), blocksize)))
236fb5de435SRitesh Harjani (IBM) flags |= IOMAP_DIO_FORCE_WAIT;
237fb5de435SRitesh Harjani (IBM)
238fb5de435SRitesh Harjani (IBM) ret = iomap_dio_rw(iocb, from, &ext2_iomap_ops, &ext2_dio_write_ops,
239fb5de435SRitesh Harjani (IBM) flags, NULL, 0);
240fb5de435SRitesh Harjani (IBM)
241fb5de435SRitesh Harjani (IBM) /* ENOTBLK is magic return value for fallback to buffered-io */
242fb5de435SRitesh Harjani (IBM) if (ret == -ENOTBLK)
243fb5de435SRitesh Harjani (IBM) ret = 0;
244fb5de435SRitesh Harjani (IBM)
245fb5de435SRitesh Harjani (IBM) if (ret < 0 && ret != -EIOCBQUEUED)
246fb5de435SRitesh Harjani (IBM) ext2_write_failed(inode->i_mapping, offset + count);
247fb5de435SRitesh Harjani (IBM)
248fb5de435SRitesh Harjani (IBM) /* handle case for partial write and for fallback to buffered write */
249fb5de435SRitesh Harjani (IBM) if (ret >= 0 && iov_iter_count(from)) {
250fb5de435SRitesh Harjani (IBM) loff_t pos, endbyte;
251fb5de435SRitesh Harjani (IBM) int ret2;
252fb5de435SRitesh Harjani (IBM)
253fb5de435SRitesh Harjani (IBM) iocb->ki_flags &= ~IOCB_DIRECT;
254fb5de435SRitesh Harjani (IBM) pos = iocb->ki_pos;
255fb5de435SRitesh Harjani (IBM) status = generic_perform_write(iocb, from);
256fb5de435SRitesh Harjani (IBM) if (unlikely(status < 0)) {
257fb5de435SRitesh Harjani (IBM) ret = status;
258fb5de435SRitesh Harjani (IBM) goto out_unlock;
259fb5de435SRitesh Harjani (IBM) }
260fb5de435SRitesh Harjani (IBM)
261fb5de435SRitesh Harjani (IBM) ret += status;
262fb5de435SRitesh Harjani (IBM) endbyte = pos + status - 1;
263fb5de435SRitesh Harjani (IBM) ret2 = filemap_write_and_wait_range(inode->i_mapping, pos,
264fb5de435SRitesh Harjani (IBM) endbyte);
265fb5de435SRitesh Harjani (IBM) if (!ret2)
266fb5de435SRitesh Harjani (IBM) invalidate_mapping_pages(inode->i_mapping,
267fb5de435SRitesh Harjani (IBM) pos >> PAGE_SHIFT,
268fb5de435SRitesh Harjani (IBM) endbyte >> PAGE_SHIFT);
269fb5de435SRitesh Harjani (IBM) if (ret > 0)
270fb5de435SRitesh Harjani (IBM) generic_write_sync(iocb, ret);
271fb5de435SRitesh Harjani (IBM) }
272fb5de435SRitesh Harjani (IBM)
273fb5de435SRitesh Harjani (IBM) out_unlock:
274fb5de435SRitesh Harjani (IBM) inode_unlock(inode);
2756e335cd7SRitesh Harjani (IBM) if (status)
2766e335cd7SRitesh Harjani (IBM) trace_ext2_dio_write_buff_end(iocb, from, status);
2776e335cd7SRitesh Harjani (IBM) trace_ext2_dio_write_end(iocb, from, ret);
278fb5de435SRitesh Harjani (IBM) return ret;
279fb5de435SRitesh Harjani (IBM) }
280fb5de435SRitesh Harjani (IBM)
ext2_file_read_iter(struct kiocb * iocb,struct iov_iter * to)28125f4e702SChristoph Hellwig static ssize_t ext2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
28225f4e702SChristoph Hellwig {
28325f4e702SChristoph Hellwig #ifdef CONFIG_FS_DAX
28425f4e702SChristoph Hellwig if (IS_DAX(iocb->ki_filp->f_mapping->host))
28525f4e702SChristoph Hellwig return ext2_dax_read_iter(iocb, to);
28625f4e702SChristoph Hellwig #endif
287fb5de435SRitesh Harjani (IBM) if (iocb->ki_flags & IOCB_DIRECT)
288fb5de435SRitesh Harjani (IBM) return ext2_dio_read_iter(iocb, to);
289fb5de435SRitesh Harjani (IBM)
29025f4e702SChristoph Hellwig return generic_file_read_iter(iocb, to);
29125f4e702SChristoph Hellwig }
29225f4e702SChristoph Hellwig
ext2_file_write_iter(struct kiocb * iocb,struct iov_iter * from)29325f4e702SChristoph Hellwig static ssize_t ext2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
29425f4e702SChristoph Hellwig {
29525f4e702SChristoph Hellwig #ifdef CONFIG_FS_DAX
29625f4e702SChristoph Hellwig if (IS_DAX(iocb->ki_filp->f_mapping->host))
29725f4e702SChristoph Hellwig return ext2_dax_write_iter(iocb, from);
29825f4e702SChristoph Hellwig #endif
299fb5de435SRitesh Harjani (IBM) if (iocb->ki_flags & IOCB_DIRECT)
300fb5de435SRitesh Harjani (IBM) return ext2_dio_write_iter(iocb, from);
301fb5de435SRitesh Harjani (IBM)
30225f4e702SChristoph Hellwig return generic_file_write_iter(iocb, from);
30325f4e702SChristoph Hellwig }
30425f4e702SChristoph Hellwig
3054b6f5d20SArjan van de Ven const struct file_operations ext2_file_operations = {
3061da177e4SLinus Torvalds .llseek = generic_file_llseek,
30725f4e702SChristoph Hellwig .read_iter = ext2_file_read_iter,
30825f4e702SChristoph Hellwig .write_iter = ext2_file_write_iter,
30914f9f7b2SAndi Kleen .unlocked_ioctl = ext2_ioctl,
310e322ff07SDavid Howells #ifdef CONFIG_COMPAT
311e322ff07SDavid Howells .compat_ioctl = ext2_compat_ioctl,
312e322ff07SDavid Howells #endif
313f7ca90b1SMatthew Wilcox .mmap = ext2_file_mmap,
314907f4554SChristoph Hellwig .open = dquot_file_open,
3151da177e4SLinus Torvalds .release = ext2_release_file,
31648bde86dSJan Kara .fsync = ext2_fsync,
317dbe6ec81SToshi Kani .get_unmapped_area = thp_get_unmapped_area,
3182cb1e089SDavid Howells .splice_read = filemap_splice_read,
3198d020765SAl Viro .splice_write = iter_file_splice_write,
3201da177e4SLinus Torvalds };
3211da177e4SLinus Torvalds
322754661f1SArjan van de Ven const struct inode_operations ext2_file_inode_operations = {
3231da177e4SLinus Torvalds .listxattr = ext2_listxattr,
32493bc420eSyangerkun .getattr = ext2_getattr,
3251da177e4SLinus Torvalds .setattr = ext2_setattr,
326cac2f8b8SChristian Brauner .get_inode_acl = ext2_get_acl,
32764e178a7SChristoph Hellwig .set_acl = ext2_set_acl,
32868c9d702SJosef Bacik .fiemap = ext2_fiemap,
329aba405e3SMiklos Szeredi .fileattr_get = ext2_fileattr_get,
330aba405e3SMiklos Szeredi .fileattr_set = ext2_fileattr_set,
3311da177e4SLinus Torvalds };
332