xref: /openbmc/linux/fs/ext2/file.c (revision fb5de435)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  *  linux/fs/ext2/file.c
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * Copyright (C) 1992, 1993, 1994, 1995
61da177e4SLinus Torvalds  * Remy Card (card@masi.ibp.fr)
71da177e4SLinus Torvalds  * Laboratoire MASI - Institut Blaise Pascal
81da177e4SLinus Torvalds  * Universite Pierre et Marie Curie (Paris VI)
91da177e4SLinus Torvalds  *
101da177e4SLinus Torvalds  *  from
111da177e4SLinus Torvalds  *
121da177e4SLinus Torvalds  *  linux/fs/minix/file.c
131da177e4SLinus Torvalds  *
141da177e4SLinus Torvalds  *  Copyright (C) 1991, 1992  Linus Torvalds
151da177e4SLinus Torvalds  *
161da177e4SLinus Torvalds  *  ext2 fs regular file handling primitives
171da177e4SLinus Torvalds  *
181da177e4SLinus Torvalds  *  64-bit file support on 64-bit platforms by Jakub Jelinek
191da177e4SLinus Torvalds  * 	(jj@sunsite.ms.mff.cuni.cz)
201da177e4SLinus Torvalds  */
211da177e4SLinus Torvalds 
221da177e4SLinus Torvalds #include <linux/time.h>
2348bde86dSJan Kara #include <linux/pagemap.h>
24c94c2acfSMatthew Wilcox #include <linux/dax.h>
25871a2931SChristoph Hellwig #include <linux/quotaops.h>
2625f4e702SChristoph Hellwig #include <linux/iomap.h>
2725f4e702SChristoph Hellwig #include <linux/uio.h>
28d0530704SRitesh Harjani (IBM) #include <linux/buffer_head.h>
291da177e4SLinus Torvalds #include "ext2.h"
301da177e4SLinus Torvalds #include "xattr.h"
311da177e4SLinus Torvalds #include "acl.h"
321da177e4SLinus Torvalds 
336cd176a5SMatthew Wilcox #ifdef CONFIG_FS_DAX
3425f4e702SChristoph Hellwig static ssize_t ext2_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
3525f4e702SChristoph Hellwig {
3625f4e702SChristoph Hellwig 	struct inode *inode = iocb->ki_filp->f_mapping->host;
3725f4e702SChristoph Hellwig 	ssize_t ret;
3825f4e702SChristoph Hellwig 
3925f4e702SChristoph Hellwig 	if (!iov_iter_count(to))
4025f4e702SChristoph Hellwig 		return 0; /* skip atime */
4125f4e702SChristoph Hellwig 
4225f4e702SChristoph Hellwig 	inode_lock_shared(inode);
4311c59c92SRoss Zwisler 	ret = dax_iomap_rw(iocb, to, &ext2_iomap_ops);
4425f4e702SChristoph Hellwig 	inode_unlock_shared(inode);
4525f4e702SChristoph Hellwig 
4625f4e702SChristoph Hellwig 	file_accessed(iocb->ki_filp);
4725f4e702SChristoph Hellwig 	return ret;
4825f4e702SChristoph Hellwig }
4925f4e702SChristoph Hellwig 
5025f4e702SChristoph Hellwig static ssize_t ext2_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
5125f4e702SChristoph Hellwig {
5225f4e702SChristoph Hellwig 	struct file *file = iocb->ki_filp;
5325f4e702SChristoph Hellwig 	struct inode *inode = file->f_mapping->host;
5425f4e702SChristoph Hellwig 	ssize_t ret;
5525f4e702SChristoph Hellwig 
5625f4e702SChristoph Hellwig 	inode_lock(inode);
5725f4e702SChristoph Hellwig 	ret = generic_write_checks(iocb, from);
5825f4e702SChristoph Hellwig 	if (ret <= 0)
5925f4e702SChristoph Hellwig 		goto out_unlock;
6025f4e702SChristoph Hellwig 	ret = file_remove_privs(file);
6125f4e702SChristoph Hellwig 	if (ret)
6225f4e702SChristoph Hellwig 		goto out_unlock;
6325f4e702SChristoph Hellwig 	ret = file_update_time(file);
6425f4e702SChristoph Hellwig 	if (ret)
6525f4e702SChristoph Hellwig 		goto out_unlock;
6625f4e702SChristoph Hellwig 
6711c59c92SRoss Zwisler 	ret = dax_iomap_rw(iocb, from, &ext2_iomap_ops);
6825f4e702SChristoph Hellwig 	if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
6925f4e702SChristoph Hellwig 		i_size_write(inode, iocb->ki_pos);
7025f4e702SChristoph Hellwig 		mark_inode_dirty(inode);
7125f4e702SChristoph Hellwig 	}
7225f4e702SChristoph Hellwig 
7325f4e702SChristoph Hellwig out_unlock:
7425f4e702SChristoph Hellwig 	inode_unlock(inode);
7525f4e702SChristoph Hellwig 	if (ret > 0)
7625f4e702SChristoph Hellwig 		ret = generic_write_sync(iocb, ret);
7725f4e702SChristoph Hellwig 	return ret;
7825f4e702SChristoph Hellwig }
7925f4e702SChristoph Hellwig 
805726b27bSRoss Zwisler /*
815726b27bSRoss Zwisler  * The lock ordering for ext2 DAX fault paths is:
825726b27bSRoss Zwisler  *
83c1e8d7c6SMichel Lespinasse  * mmap_lock (MM)
845726b27bSRoss Zwisler  *   sb_start_pagefault (vfs, freeze)
8570f3bad8SJan Kara  *     address_space->invalidate_lock
865726b27bSRoss Zwisler  *       address_space->i_mmap_rwsem or page_lock (mutually exclusive in DAX)
875726b27bSRoss Zwisler  *         ext2_inode_info->truncate_mutex
885726b27bSRoss Zwisler  *
895726b27bSRoss Zwisler  * The default page_lock and i_size verification done by non-DAX fault paths
905726b27bSRoss Zwisler  * is sufficient because ext2 doesn't support hole punching.
915726b27bSRoss Zwisler  */
9206856938SSouptick Joarder static vm_fault_t ext2_dax_fault(struct vm_fault *vmf)
93f7ca90b1SMatthew Wilcox {
9411bac800SDave Jiang 	struct inode *inode = file_inode(vmf->vma->vm_file);
9506856938SSouptick Joarder 	vm_fault_t ret;
961ef6ea0eSMikulas Patocka 	bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
971ef6ea0eSMikulas Patocka 		(vmf->vma->vm_flags & VM_SHARED);
985726b27bSRoss Zwisler 
991ef6ea0eSMikulas Patocka 	if (write) {
1005726b27bSRoss Zwisler 		sb_start_pagefault(inode->i_sb);
10111bac800SDave Jiang 		file_update_time(vmf->vma->vm_file);
1025726b27bSRoss Zwisler 	}
10370f3bad8SJan Kara 	filemap_invalidate_lock_shared(inode->i_mapping);
1045726b27bSRoss Zwisler 
105c0b24625SJan Kara 	ret = dax_iomap_fault(vmf, PE_SIZE_PTE, NULL, NULL, &ext2_iomap_ops);
1065726b27bSRoss Zwisler 
10770f3bad8SJan Kara 	filemap_invalidate_unlock_shared(inode->i_mapping);
1081ef6ea0eSMikulas Patocka 	if (write)
1095726b27bSRoss Zwisler 		sb_end_pagefault(inode->i_sb);
1105726b27bSRoss Zwisler 	return ret;
111f7ca90b1SMatthew Wilcox }
112f7ca90b1SMatthew Wilcox 
113f7ca90b1SMatthew Wilcox static const struct vm_operations_struct ext2_dax_vm_ops = {
114f7ca90b1SMatthew Wilcox 	.fault		= ext2_dax_fault,
11503e0990fSRoss Zwisler 	/*
116a2d58167SDave Jiang 	 * .huge_fault is not supported for DAX because allocation in ext2
11703e0990fSRoss Zwisler 	 * cannot be reliably aligned to huge page sizes and so pmd faults
11803e0990fSRoss Zwisler 	 * will always fail and fail back to regular faults.
11903e0990fSRoss Zwisler 	 */
1201e9d180bSRoss Zwisler 	.page_mkwrite	= ext2_dax_fault,
12191d25ba8SRoss Zwisler 	.pfn_mkwrite	= ext2_dax_fault,
122f7ca90b1SMatthew Wilcox };
123f7ca90b1SMatthew Wilcox 
124f7ca90b1SMatthew Wilcox static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma)
125f7ca90b1SMatthew Wilcox {
126f7ca90b1SMatthew Wilcox 	if (!IS_DAX(file_inode(file)))
127f7ca90b1SMatthew Wilcox 		return generic_file_mmap(file, vma);
128f7ca90b1SMatthew Wilcox 
129f7ca90b1SMatthew Wilcox 	file_accessed(file);
130f7ca90b1SMatthew Wilcox 	vma->vm_ops = &ext2_dax_vm_ops;
131f7ca90b1SMatthew Wilcox 	return 0;
132f7ca90b1SMatthew Wilcox }
133f7ca90b1SMatthew Wilcox #else
134f7ca90b1SMatthew Wilcox #define ext2_file_mmap	generic_file_mmap
135f7ca90b1SMatthew Wilcox #endif
136f7ca90b1SMatthew Wilcox 
1371da177e4SLinus Torvalds /*
138a6739af8SJan Kara  * Called when filp is released. This happens when all file descriptors
139a6739af8SJan Kara  * for a single struct file are closed. Note that different open() calls
140a6739af8SJan Kara  * for the same file yield different struct file structures.
1411da177e4SLinus Torvalds  */
1421da177e4SLinus Torvalds static int ext2_release_file (struct inode * inode, struct file * filp)
1431da177e4SLinus Torvalds {
144a686cd89SMartin J. Bligh 	if (filp->f_mode & FMODE_WRITE) {
145a686cd89SMartin J. Bligh 		mutex_lock(&EXT2_I(inode)->truncate_mutex);
146a686cd89SMartin J. Bligh 		ext2_discard_reservation(inode);
147a686cd89SMartin J. Bligh 		mutex_unlock(&EXT2_I(inode)->truncate_mutex);
148a686cd89SMartin J. Bligh 	}
1491da177e4SLinus Torvalds 	return 0;
1501da177e4SLinus Torvalds }
1511da177e4SLinus Torvalds 
15202c24a82SJosef Bacik int ext2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
15348bde86dSJan Kara {
15448bde86dSJan Kara 	int ret;
1557ea80859SChristoph Hellwig 	struct super_block *sb = file->f_mapping->host->i_sb;
15648bde86dSJan Kara 
157d0530704SRitesh Harjani (IBM) 	ret = generic_buffers_fsync(file, start, end, datasync);
158dac257f7SJeff Layton 	if (ret == -EIO)
15948bde86dSJan Kara 		/* We don't really know where the IO error happened... */
16048bde86dSJan Kara 		ext2_error(sb, __func__,
16148bde86dSJan Kara 			   "detected IO error when writing metadata buffers");
16248bde86dSJan Kara 	return ret;
16348bde86dSJan Kara }
16448bde86dSJan Kara 
165*fb5de435SRitesh Harjani (IBM) static ssize_t ext2_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
166*fb5de435SRitesh Harjani (IBM) {
167*fb5de435SRitesh Harjani (IBM) 	struct file *file = iocb->ki_filp;
168*fb5de435SRitesh Harjani (IBM) 	struct inode *inode = file->f_mapping->host;
169*fb5de435SRitesh Harjani (IBM) 	ssize_t ret;
170*fb5de435SRitesh Harjani (IBM) 
171*fb5de435SRitesh Harjani (IBM) 	inode_lock_shared(inode);
172*fb5de435SRitesh Harjani (IBM) 	ret = iomap_dio_rw(iocb, to, &ext2_iomap_ops, NULL, 0, NULL, 0);
173*fb5de435SRitesh Harjani (IBM) 	inode_unlock_shared(inode);
174*fb5de435SRitesh Harjani (IBM) 
175*fb5de435SRitesh Harjani (IBM) 	return ret;
176*fb5de435SRitesh Harjani (IBM) }
177*fb5de435SRitesh Harjani (IBM) 
178*fb5de435SRitesh Harjani (IBM) static int ext2_dio_write_end_io(struct kiocb *iocb, ssize_t size,
179*fb5de435SRitesh Harjani (IBM) 				 int error, unsigned int flags)
180*fb5de435SRitesh Harjani (IBM) {
181*fb5de435SRitesh Harjani (IBM) 	loff_t pos = iocb->ki_pos;
182*fb5de435SRitesh Harjani (IBM) 	struct inode *inode = file_inode(iocb->ki_filp);
183*fb5de435SRitesh Harjani (IBM) 
184*fb5de435SRitesh Harjani (IBM) 	if (error)
185*fb5de435SRitesh Harjani (IBM) 		goto out;
186*fb5de435SRitesh Harjani (IBM) 
187*fb5de435SRitesh Harjani (IBM) 	/*
188*fb5de435SRitesh Harjani (IBM) 	 * If we are extending the file, we have to update i_size here before
189*fb5de435SRitesh Harjani (IBM) 	 * page cache gets invalidated in iomap_dio_rw(). This prevents racing
190*fb5de435SRitesh Harjani (IBM) 	 * buffered reads from zeroing out too much from page cache pages.
191*fb5de435SRitesh Harjani (IBM) 	 * Note that all extending writes always happens synchronously with
192*fb5de435SRitesh Harjani (IBM) 	 * inode lock held by ext2_dio_write_iter(). So it is safe to update
193*fb5de435SRitesh Harjani (IBM) 	 * inode size here for extending file writes.
194*fb5de435SRitesh Harjani (IBM) 	 */
195*fb5de435SRitesh Harjani (IBM) 	pos += size;
196*fb5de435SRitesh Harjani (IBM) 	if (pos > i_size_read(inode)) {
197*fb5de435SRitesh Harjani (IBM) 		i_size_write(inode, pos);
198*fb5de435SRitesh Harjani (IBM) 		mark_inode_dirty(inode);
199*fb5de435SRitesh Harjani (IBM) 	}
200*fb5de435SRitesh Harjani (IBM) out:
201*fb5de435SRitesh Harjani (IBM) 	return error;
202*fb5de435SRitesh Harjani (IBM) }
203*fb5de435SRitesh Harjani (IBM) 
204*fb5de435SRitesh Harjani (IBM) static const struct iomap_dio_ops ext2_dio_write_ops = {
205*fb5de435SRitesh Harjani (IBM) 	.end_io = ext2_dio_write_end_io,
206*fb5de435SRitesh Harjani (IBM) };
207*fb5de435SRitesh Harjani (IBM) 
208*fb5de435SRitesh Harjani (IBM) static ssize_t ext2_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
209*fb5de435SRitesh Harjani (IBM) {
210*fb5de435SRitesh Harjani (IBM) 	struct file *file = iocb->ki_filp;
211*fb5de435SRitesh Harjani (IBM) 	struct inode *inode = file->f_mapping->host;
212*fb5de435SRitesh Harjani (IBM) 	ssize_t ret;
213*fb5de435SRitesh Harjani (IBM) 	unsigned int flags = 0;
214*fb5de435SRitesh Harjani (IBM) 	unsigned long blocksize = inode->i_sb->s_blocksize;
215*fb5de435SRitesh Harjani (IBM) 	loff_t offset = iocb->ki_pos;
216*fb5de435SRitesh Harjani (IBM) 	loff_t count = iov_iter_count(from);
217*fb5de435SRitesh Harjani (IBM) 
218*fb5de435SRitesh Harjani (IBM) 	inode_lock(inode);
219*fb5de435SRitesh Harjani (IBM) 	ret = generic_write_checks(iocb, from);
220*fb5de435SRitesh Harjani (IBM) 	if (ret <= 0)
221*fb5de435SRitesh Harjani (IBM) 		goto out_unlock;
222*fb5de435SRitesh Harjani (IBM) 
223*fb5de435SRitesh Harjani (IBM) 	ret = kiocb_modified(iocb);
224*fb5de435SRitesh Harjani (IBM) 	if (ret)
225*fb5de435SRitesh Harjani (IBM) 		goto out_unlock;
226*fb5de435SRitesh Harjani (IBM) 
227*fb5de435SRitesh Harjani (IBM) 	/* use IOMAP_DIO_FORCE_WAIT for unaligned or extending writes */
228*fb5de435SRitesh Harjani (IBM) 	if (iocb->ki_pos + iov_iter_count(from) > i_size_read(inode) ||
229*fb5de435SRitesh Harjani (IBM) 	   (!IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(from), blocksize)))
230*fb5de435SRitesh Harjani (IBM) 		flags |= IOMAP_DIO_FORCE_WAIT;
231*fb5de435SRitesh Harjani (IBM) 
232*fb5de435SRitesh Harjani (IBM) 	ret = iomap_dio_rw(iocb, from, &ext2_iomap_ops, &ext2_dio_write_ops,
233*fb5de435SRitesh Harjani (IBM) 			   flags, NULL, 0);
234*fb5de435SRitesh Harjani (IBM) 
235*fb5de435SRitesh Harjani (IBM) 	/* ENOTBLK is magic return value for fallback to buffered-io */
236*fb5de435SRitesh Harjani (IBM) 	if (ret == -ENOTBLK)
237*fb5de435SRitesh Harjani (IBM) 		ret = 0;
238*fb5de435SRitesh Harjani (IBM) 
239*fb5de435SRitesh Harjani (IBM) 	if (ret < 0 && ret != -EIOCBQUEUED)
240*fb5de435SRitesh Harjani (IBM) 		ext2_write_failed(inode->i_mapping, offset + count);
241*fb5de435SRitesh Harjani (IBM) 
242*fb5de435SRitesh Harjani (IBM) 	/* handle case for partial write and for fallback to buffered write */
243*fb5de435SRitesh Harjani (IBM) 	if (ret >= 0 && iov_iter_count(from)) {
244*fb5de435SRitesh Harjani (IBM) 		loff_t pos, endbyte;
245*fb5de435SRitesh Harjani (IBM) 		ssize_t status;
246*fb5de435SRitesh Harjani (IBM) 		int ret2;
247*fb5de435SRitesh Harjani (IBM) 
248*fb5de435SRitesh Harjani (IBM) 		iocb->ki_flags &= ~IOCB_DIRECT;
249*fb5de435SRitesh Harjani (IBM) 		pos = iocb->ki_pos;
250*fb5de435SRitesh Harjani (IBM) 		status = generic_perform_write(iocb, from);
251*fb5de435SRitesh Harjani (IBM) 		if (unlikely(status < 0)) {
252*fb5de435SRitesh Harjani (IBM) 			ret = status;
253*fb5de435SRitesh Harjani (IBM) 			goto out_unlock;
254*fb5de435SRitesh Harjani (IBM) 		}
255*fb5de435SRitesh Harjani (IBM) 
256*fb5de435SRitesh Harjani (IBM) 		iocb->ki_pos += status;
257*fb5de435SRitesh Harjani (IBM) 		ret += status;
258*fb5de435SRitesh Harjani (IBM) 		endbyte = pos + status - 1;
259*fb5de435SRitesh Harjani (IBM) 		ret2 = filemap_write_and_wait_range(inode->i_mapping, pos,
260*fb5de435SRitesh Harjani (IBM) 						    endbyte);
261*fb5de435SRitesh Harjani (IBM) 		if (!ret2)
262*fb5de435SRitesh Harjani (IBM) 			invalidate_mapping_pages(inode->i_mapping,
263*fb5de435SRitesh Harjani (IBM) 						 pos >> PAGE_SHIFT,
264*fb5de435SRitesh Harjani (IBM) 						 endbyte >> PAGE_SHIFT);
265*fb5de435SRitesh Harjani (IBM) 		if (ret > 0)
266*fb5de435SRitesh Harjani (IBM) 			generic_write_sync(iocb, ret);
267*fb5de435SRitesh Harjani (IBM) 	}
268*fb5de435SRitesh Harjani (IBM) 
269*fb5de435SRitesh Harjani (IBM) out_unlock:
270*fb5de435SRitesh Harjani (IBM) 	inode_unlock(inode);
271*fb5de435SRitesh Harjani (IBM) 	return ret;
272*fb5de435SRitesh Harjani (IBM) }
273*fb5de435SRitesh Harjani (IBM) 
27425f4e702SChristoph Hellwig static ssize_t ext2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
27525f4e702SChristoph Hellwig {
27625f4e702SChristoph Hellwig #ifdef CONFIG_FS_DAX
27725f4e702SChristoph Hellwig 	if (IS_DAX(iocb->ki_filp->f_mapping->host))
27825f4e702SChristoph Hellwig 		return ext2_dax_read_iter(iocb, to);
27925f4e702SChristoph Hellwig #endif
280*fb5de435SRitesh Harjani (IBM) 	if (iocb->ki_flags & IOCB_DIRECT)
281*fb5de435SRitesh Harjani (IBM) 		return ext2_dio_read_iter(iocb, to);
282*fb5de435SRitesh Harjani (IBM) 
28325f4e702SChristoph Hellwig 	return generic_file_read_iter(iocb, to);
28425f4e702SChristoph Hellwig }
28525f4e702SChristoph Hellwig 
28625f4e702SChristoph Hellwig static ssize_t ext2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
28725f4e702SChristoph Hellwig {
28825f4e702SChristoph Hellwig #ifdef CONFIG_FS_DAX
28925f4e702SChristoph Hellwig 	if (IS_DAX(iocb->ki_filp->f_mapping->host))
29025f4e702SChristoph Hellwig 		return ext2_dax_write_iter(iocb, from);
29125f4e702SChristoph Hellwig #endif
292*fb5de435SRitesh Harjani (IBM) 	if (iocb->ki_flags & IOCB_DIRECT)
293*fb5de435SRitesh Harjani (IBM) 		return ext2_dio_write_iter(iocb, from);
294*fb5de435SRitesh Harjani (IBM) 
29525f4e702SChristoph Hellwig 	return generic_file_write_iter(iocb, from);
29625f4e702SChristoph Hellwig }
29725f4e702SChristoph Hellwig 
2984b6f5d20SArjan van de Ven const struct file_operations ext2_file_operations = {
2991da177e4SLinus Torvalds 	.llseek		= generic_file_llseek,
30025f4e702SChristoph Hellwig 	.read_iter	= ext2_file_read_iter,
30125f4e702SChristoph Hellwig 	.write_iter	= ext2_file_write_iter,
30214f9f7b2SAndi Kleen 	.unlocked_ioctl = ext2_ioctl,
303e322ff07SDavid Howells #ifdef CONFIG_COMPAT
304e322ff07SDavid Howells 	.compat_ioctl	= ext2_compat_ioctl,
305e322ff07SDavid Howells #endif
306f7ca90b1SMatthew Wilcox 	.mmap		= ext2_file_mmap,
307907f4554SChristoph Hellwig 	.open		= dquot_file_open,
3081da177e4SLinus Torvalds 	.release	= ext2_release_file,
30948bde86dSJan Kara 	.fsync		= ext2_fsync,
310dbe6ec81SToshi Kani 	.get_unmapped_area = thp_get_unmapped_area,
3115274f052SJens Axboe 	.splice_read	= generic_file_splice_read,
3128d020765SAl Viro 	.splice_write	= iter_file_splice_write,
3131da177e4SLinus Torvalds };
3141da177e4SLinus Torvalds 
315754661f1SArjan van de Ven const struct inode_operations ext2_file_inode_operations = {
3161da177e4SLinus Torvalds 	.listxattr	= ext2_listxattr,
31793bc420eSyangerkun 	.getattr	= ext2_getattr,
3181da177e4SLinus Torvalds 	.setattr	= ext2_setattr,
319cac2f8b8SChristian Brauner 	.get_inode_acl	= ext2_get_acl,
32064e178a7SChristoph Hellwig 	.set_acl	= ext2_set_acl,
32168c9d702SJosef Bacik 	.fiemap		= ext2_fiemap,
322aba405e3SMiklos Szeredi 	.fileattr_get	= ext2_fileattr_get,
323aba405e3SMiklos Szeredi 	.fileattr_set	= ext2_fileattr_set,
3241da177e4SLinus Torvalds };
325