1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2f79e2abbSAndrew Morton /*
3f79e2abbSAndrew Morton * High-level sync()-related operations
4f79e2abbSAndrew Morton */
5f79e2abbSAndrew Morton
670164eb6SChristoph Hellwig #include <linux/blkdev.h>
7f79e2abbSAndrew Morton #include <linux/kernel.h>
8f79e2abbSAndrew Morton #include <linux/file.h>
9f79e2abbSAndrew Morton #include <linux/fs.h>
105a0e3ad6STejun Heo #include <linux/slab.h>
11630d9c47SPaul Gortmaker #include <linux/export.h>
12b7ed78f5SSage Weil #include <linux/namei.h>
13914e2637SAl Viro #include <linux/sched.h>
14f79e2abbSAndrew Morton #include <linux/writeback.h>
15f79e2abbSAndrew Morton #include <linux/syscalls.h>
16f79e2abbSAndrew Morton #include <linux/linkage.h>
17f79e2abbSAndrew Morton #include <linux/pagemap.h>
18cf9a2ae8SDavid Howells #include <linux/quotaops.h>
195129a469SJörn Engel #include <linux/backing-dev.h>
205a3e5cb8SJan Kara #include "internal.h"
21f79e2abbSAndrew Morton
22f79e2abbSAndrew Morton #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \
23f79e2abbSAndrew Morton SYNC_FILE_RANGE_WAIT_AFTER)
24f79e2abbSAndrew Morton
25c15c54f5SJan Kara /*
26c15c54f5SJan Kara * Write out and wait upon all dirty data associated with this
27c15c54f5SJan Kara * superblock. Filesystem data as well as the underlying block
28c15c54f5SJan Kara * device. Takes the superblock lock.
29c15c54f5SJan Kara */
sync_filesystem(struct super_block * sb)3060b0680fSJan Kara int sync_filesystem(struct super_block *sb)
31c15c54f5SJan Kara {
325679897eSDarrick J. Wong int ret = 0;
33c15c54f5SJan Kara
345af7926fSChristoph Hellwig /*
355af7926fSChristoph Hellwig * We need to be protected against the filesystem going from
365af7926fSChristoph Hellwig * r/o to r/w or vice versa.
375af7926fSChristoph Hellwig */
385af7926fSChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount));
395af7926fSChristoph Hellwig
405af7926fSChristoph Hellwig /*
415af7926fSChristoph Hellwig * No point in syncing out anything if the filesystem is read-only.
425af7926fSChristoph Hellwig */
43bc98a42cSDavid Howells if (sb_rdonly(sb))
445af7926fSChristoph Hellwig return 0;
455af7926fSChristoph Hellwig
469a208ba5SChristoph Hellwig /*
479a208ba5SChristoph Hellwig * Do the filesystem syncing work. For simple filesystems
489a208ba5SChristoph Hellwig * writeback_inodes_sb(sb) just dirties buffers with inodes so we have
4970164eb6SChristoph Hellwig * to submit I/O for these buffers via sync_blockdev(). This also
509a208ba5SChristoph Hellwig * speeds up the wait == 1 case since in that case write_inode()
519a208ba5SChristoph Hellwig * methods call sync_dirty_buffer() and thus effectively write one block
529a208ba5SChristoph Hellwig * at a time.
539a208ba5SChristoph Hellwig */
549a208ba5SChristoph Hellwig writeback_inodes_sb(sb, WB_REASON_SYNC);
555679897eSDarrick J. Wong if (sb->s_op->sync_fs) {
565679897eSDarrick J. Wong ret = sb->s_op->sync_fs(sb, 0);
575679897eSDarrick J. Wong if (ret)
585679897eSDarrick J. Wong return ret;
595679897eSDarrick J. Wong }
6070164eb6SChristoph Hellwig ret = sync_blockdev_nowait(sb->s_bdev);
615679897eSDarrick J. Wong if (ret)
62c15c54f5SJan Kara return ret;
639a208ba5SChristoph Hellwig
649a208ba5SChristoph Hellwig sync_inodes_sb(sb);
655679897eSDarrick J. Wong if (sb->s_op->sync_fs) {
665679897eSDarrick J. Wong ret = sb->s_op->sync_fs(sb, 1);
675679897eSDarrick J. Wong if (ret)
685679897eSDarrick J. Wong return ret;
695679897eSDarrick J. Wong }
7070164eb6SChristoph Hellwig return sync_blockdev(sb->s_bdev);
71c15c54f5SJan Kara }
7210096fb1SAnton Altaparmakov EXPORT_SYMBOL(sync_filesystem);
73c15c54f5SJan Kara
sync_inodes_one_sb(struct super_block * sb,void * arg)74b3de6531SJan Kara static void sync_inodes_one_sb(struct super_block *sb, void *arg)
7501a05b33SAl Viro {
76bc98a42cSDavid Howells if (!sb_rdonly(sb))
770dc83bd3SJan Kara sync_inodes_sb(sb);
7801a05b33SAl Viro }
79b3de6531SJan Kara
sync_fs_one_sb(struct super_block * sb,void * arg)80b3de6531SJan Kara static void sync_fs_one_sb(struct super_block *sb, void *arg)
81b3de6531SJan Kara {
8232b1924bSKonstantin Khlebnikov if (!sb_rdonly(sb) && !(sb->s_iflags & SB_I_SKIP_SYNC) &&
8332b1924bSKonstantin Khlebnikov sb->s_op->sync_fs)
84b3de6531SJan Kara sb->s_op->sync_fs(sb, *(int *)arg);
85b3de6531SJan Kara }
86b3de6531SJan Kara
873beab0b4SZhang, Yanmin /*
884ea425b6SJan Kara * Sync everything. We start by waking flusher threads so that most of
894ea425b6SJan Kara * writeback runs on all devices in parallel. Then we sync all inodes reliably
904ea425b6SJan Kara * which effectively also waits for all flusher threads to finish doing
914ea425b6SJan Kara * writeback. At this point all data is on disk so metadata should be stable
924ea425b6SJan Kara * and we tell filesystems to sync their metadata via ->sync_fs() calls.
934ea425b6SJan Kara * Finally, we writeout all block devices because some filesystems (e.g. ext2)
944ea425b6SJan Kara * just write metadata (such as inodes or bitmaps) to block device page cache
954ea425b6SJan Kara * and do not sync it on their own in ->sync_fs().
963beab0b4SZhang, Yanmin */
ksys_sync(void)9770f68ee8SDominik Brodowski void ksys_sync(void)
98cf9a2ae8SDavid Howells {
99b3de6531SJan Kara int nowait = 0, wait = 1;
100b3de6531SJan Kara
1019ba4b2dfSJens Axboe wakeup_flusher_threads(WB_REASON_SYNC);
1020dc83bd3SJan Kara iterate_supers(sync_inodes_one_sb, NULL);
1034ea425b6SJan Kara iterate_supers(sync_fs_one_sb, &nowait);
104b3de6531SJan Kara iterate_supers(sync_fs_one_sb, &wait);
1051e03a36bSChristoph Hellwig sync_bdevs(false);
1061e03a36bSChristoph Hellwig sync_bdevs(true);
1075cee5815SJan Kara if (unlikely(laptop_mode))
1085cee5815SJan Kara laptop_sync_completion();
10970f68ee8SDominik Brodowski }
11070f68ee8SDominik Brodowski
SYSCALL_DEFINE0(sync)11170f68ee8SDominik Brodowski SYSCALL_DEFINE0(sync)
11270f68ee8SDominik Brodowski {
11370f68ee8SDominik Brodowski ksys_sync();
114cf9a2ae8SDavid Howells return 0;
115cf9a2ae8SDavid Howells }
116cf9a2ae8SDavid Howells
do_sync_work(struct work_struct * work)117a2a9537aSJens Axboe static void do_sync_work(struct work_struct *work)
118a2a9537aSJens Axboe {
119b3de6531SJan Kara int nowait = 0;
120b3de6531SJan Kara
1215cee5815SJan Kara /*
1225cee5815SJan Kara * Sync twice to reduce the possibility we skipped some inodes / pages
1235cee5815SJan Kara * because they were temporarily locked
1245cee5815SJan Kara */
125b3de6531SJan Kara iterate_supers(sync_inodes_one_sb, &nowait);
126b3de6531SJan Kara iterate_supers(sync_fs_one_sb, &nowait);
1271e03a36bSChristoph Hellwig sync_bdevs(false);
128b3de6531SJan Kara iterate_supers(sync_inodes_one_sb, &nowait);
129b3de6531SJan Kara iterate_supers(sync_fs_one_sb, &nowait);
1301e03a36bSChristoph Hellwig sync_bdevs(false);
1315cee5815SJan Kara printk("Emergency Sync complete\n");
132a2a9537aSJens Axboe kfree(work);
133a2a9537aSJens Axboe }
134a2a9537aSJens Axboe
emergency_sync(void)135cf9a2ae8SDavid Howells void emergency_sync(void)
136cf9a2ae8SDavid Howells {
137a2a9537aSJens Axboe struct work_struct *work;
138a2a9537aSJens Axboe
139a2a9537aSJens Axboe work = kmalloc(sizeof(*work), GFP_ATOMIC);
140a2a9537aSJens Axboe if (work) {
141a2a9537aSJens Axboe INIT_WORK(work, do_sync_work);
142a2a9537aSJens Axboe schedule_work(work);
143a2a9537aSJens Axboe }
144cf9a2ae8SDavid Howells }
145cf9a2ae8SDavid Howells
146b7ed78f5SSage Weil /*
147b7ed78f5SSage Weil * sync a single super
148b7ed78f5SSage Weil */
SYSCALL_DEFINE1(syncfs,int,fd)149b7ed78f5SSage Weil SYSCALL_DEFINE1(syncfs, int, fd)
150b7ed78f5SSage Weil {
1512903ff01SAl Viro struct fd f = fdget(fd);
152b7ed78f5SSage Weil struct super_block *sb;
153735e4ae5SJeff Layton int ret, ret2;
154b7ed78f5SSage Weil
1552903ff01SAl Viro if (!f.file)
156b7ed78f5SSage Weil return -EBADF;
157b583043eSAl Viro sb = f.file->f_path.dentry->d_sb;
158b7ed78f5SSage Weil
159b7ed78f5SSage Weil down_read(&sb->s_umount);
160b7ed78f5SSage Weil ret = sync_filesystem(sb);
161b7ed78f5SSage Weil up_read(&sb->s_umount);
162b7ed78f5SSage Weil
163735e4ae5SJeff Layton ret2 = errseq_check_and_advance(&sb->s_wb_err, &f.file->f_sb_err);
164735e4ae5SJeff Layton
1652903ff01SAl Viro fdput(f);
166735e4ae5SJeff Layton return ret ? ret : ret2;
167b7ed78f5SSage Weil }
168b7ed78f5SSage Weil
1694c728ef5SChristoph Hellwig /**
170148f948bSJan Kara * vfs_fsync_range - helper to sync a range of data & metadata to disk
1714c728ef5SChristoph Hellwig * @file: file to sync
172148f948bSJan Kara * @start: offset in bytes of the beginning of data range to sync
173148f948bSJan Kara * @end: offset in bytes of the end of data range (inclusive)
174148f948bSJan Kara * @datasync: perform only datasync
1754c728ef5SChristoph Hellwig *
176148f948bSJan Kara * Write back data in range @start..@end and metadata for @file to disk. If
177148f948bSJan Kara * @datasync is set only metadata needed to access modified file data is
178148f948bSJan Kara * written.
1794c728ef5SChristoph Hellwig */
vfs_fsync_range(struct file * file,loff_t start,loff_t end,int datasync)1808018ab05SChristoph Hellwig int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync)
181cf9a2ae8SDavid Howells {
1820ae45f63STheodore Ts'o struct inode *inode = file->f_mapping->host;
1830ae45f63STheodore Ts'o
18472c2d531SAl Viro if (!file->f_op->fsync)
18502c24a82SJosef Bacik return -EINVAL;
1860d07e557SChristoph Hellwig if (!datasync && (inode->i_state & I_DIRTY_TIME))
1870ae45f63STheodore Ts'o mark_inode_dirty_sync(inode);
1880f41074aSJeff Layton return file->f_op->fsync(file, start, end, datasync);
189cf9a2ae8SDavid Howells }
190148f948bSJan Kara EXPORT_SYMBOL(vfs_fsync_range);
191148f948bSJan Kara
192148f948bSJan Kara /**
193148f948bSJan Kara * vfs_fsync - perform a fsync or fdatasync on a file
194148f948bSJan Kara * @file: file to sync
195148f948bSJan Kara * @datasync: only perform a fdatasync operation
196148f948bSJan Kara *
197148f948bSJan Kara * Write back data and metadata for @file to disk. If @datasync is
198148f948bSJan Kara * set only metadata needed to access modified file data is written.
199148f948bSJan Kara */
vfs_fsync(struct file * file,int datasync)2008018ab05SChristoph Hellwig int vfs_fsync(struct file *file, int datasync)
201148f948bSJan Kara {
2028018ab05SChristoph Hellwig return vfs_fsync_range(file, 0, LLONG_MAX, datasync);
203148f948bSJan Kara }
2044c728ef5SChristoph Hellwig EXPORT_SYMBOL(vfs_fsync);
205cf9a2ae8SDavid Howells
do_fsync(unsigned int fd,int datasync)2064c728ef5SChristoph Hellwig static int do_fsync(unsigned int fd, int datasync)
207cf9a2ae8SDavid Howells {
2082903ff01SAl Viro struct fd f = fdget(fd);
209cf9a2ae8SDavid Howells int ret = -EBADF;
210cf9a2ae8SDavid Howells
2112903ff01SAl Viro if (f.file) {
2122903ff01SAl Viro ret = vfs_fsync(f.file, datasync);
2132903ff01SAl Viro fdput(f);
214cf9a2ae8SDavid Howells }
215cf9a2ae8SDavid Howells return ret;
216cf9a2ae8SDavid Howells }
217cf9a2ae8SDavid Howells
SYSCALL_DEFINE1(fsync,unsigned int,fd)218a5f8fa9eSHeiko Carstens SYSCALL_DEFINE1(fsync, unsigned int, fd)
219cf9a2ae8SDavid Howells {
2204c728ef5SChristoph Hellwig return do_fsync(fd, 0);
221cf9a2ae8SDavid Howells }
222cf9a2ae8SDavid Howells
SYSCALL_DEFINE1(fdatasync,unsigned int,fd)223a5f8fa9eSHeiko Carstens SYSCALL_DEFINE1(fdatasync, unsigned int, fd)
224cf9a2ae8SDavid Howells {
2254c728ef5SChristoph Hellwig return do_fsync(fd, 1);
226cf9a2ae8SDavid Howells }
227cf9a2ae8SDavid Howells
sync_file_range(struct file * file,loff_t offset,loff_t nbytes,unsigned int flags)22822f96b38SJens Axboe int sync_file_range(struct file *file, loff_t offset, loff_t nbytes,
22922f96b38SJens Axboe unsigned int flags)
23022f96b38SJens Axboe {
23122f96b38SJens Axboe int ret;
23222f96b38SJens Axboe struct address_space *mapping;
23322f96b38SJens Axboe loff_t endbyte; /* inclusive */
23422f96b38SJens Axboe umode_t i_mode;
23522f96b38SJens Axboe
23622f96b38SJens Axboe ret = -EINVAL;
23722f96b38SJens Axboe if (flags & ~VALID_FLAGS)
23822f96b38SJens Axboe goto out;
23922f96b38SJens Axboe
24022f96b38SJens Axboe endbyte = offset + nbytes;
24122f96b38SJens Axboe
24222f96b38SJens Axboe if ((s64)offset < 0)
24322f96b38SJens Axboe goto out;
24422f96b38SJens Axboe if ((s64)endbyte < 0)
24522f96b38SJens Axboe goto out;
24622f96b38SJens Axboe if (endbyte < offset)
24722f96b38SJens Axboe goto out;
24822f96b38SJens Axboe
24922f96b38SJens Axboe if (sizeof(pgoff_t) == 4) {
25022f96b38SJens Axboe if (offset >= (0x100000000ULL << PAGE_SHIFT)) {
25122f96b38SJens Axboe /*
25222f96b38SJens Axboe * The range starts outside a 32 bit machine's
25322f96b38SJens Axboe * pagecache addressing capabilities. Let it "succeed"
25422f96b38SJens Axboe */
25522f96b38SJens Axboe ret = 0;
25622f96b38SJens Axboe goto out;
25722f96b38SJens Axboe }
25822f96b38SJens Axboe if (endbyte >= (0x100000000ULL << PAGE_SHIFT)) {
25922f96b38SJens Axboe /*
26022f96b38SJens Axboe * Out to EOF
26122f96b38SJens Axboe */
26222f96b38SJens Axboe nbytes = 0;
26322f96b38SJens Axboe }
26422f96b38SJens Axboe }
26522f96b38SJens Axboe
26622f96b38SJens Axboe if (nbytes == 0)
26722f96b38SJens Axboe endbyte = LLONG_MAX;
26822f96b38SJens Axboe else
26922f96b38SJens Axboe endbyte--; /* inclusive */
27022f96b38SJens Axboe
27122f96b38SJens Axboe i_mode = file_inode(file)->i_mode;
27222f96b38SJens Axboe ret = -ESPIPE;
27322f96b38SJens Axboe if (!S_ISREG(i_mode) && !S_ISBLK(i_mode) && !S_ISDIR(i_mode) &&
27422f96b38SJens Axboe !S_ISLNK(i_mode))
27522f96b38SJens Axboe goto out;
27622f96b38SJens Axboe
27722f96b38SJens Axboe mapping = file->f_mapping;
27822f96b38SJens Axboe ret = 0;
27922f96b38SJens Axboe if (flags & SYNC_FILE_RANGE_WAIT_BEFORE) {
28022f96b38SJens Axboe ret = file_fdatawait_range(file, offset, endbyte);
28122f96b38SJens Axboe if (ret < 0)
28222f96b38SJens Axboe goto out;
28322f96b38SJens Axboe }
28422f96b38SJens Axboe
28522f96b38SJens Axboe if (flags & SYNC_FILE_RANGE_WRITE) {
286c553ea4fSAmir Goldstein int sync_mode = WB_SYNC_NONE;
287c553ea4fSAmir Goldstein
288c553ea4fSAmir Goldstein if ((flags & SYNC_FILE_RANGE_WRITE_AND_WAIT) ==
289c553ea4fSAmir Goldstein SYNC_FILE_RANGE_WRITE_AND_WAIT)
290c553ea4fSAmir Goldstein sync_mode = WB_SYNC_ALL;
291c553ea4fSAmir Goldstein
29222f96b38SJens Axboe ret = __filemap_fdatawrite_range(mapping, offset, endbyte,
293c553ea4fSAmir Goldstein sync_mode);
29422f96b38SJens Axboe if (ret < 0)
29522f96b38SJens Axboe goto out;
29622f96b38SJens Axboe }
29722f96b38SJens Axboe
29822f96b38SJens Axboe if (flags & SYNC_FILE_RANGE_WAIT_AFTER)
29922f96b38SJens Axboe ret = file_fdatawait_range(file, offset, endbyte);
30022f96b38SJens Axboe
30122f96b38SJens Axboe out:
30222f96b38SJens Axboe return ret;
30322f96b38SJens Axboe }
30422f96b38SJens Axboe
305cf9a2ae8SDavid Howells /*
306c553ea4fSAmir Goldstein * ksys_sync_file_range() permits finely controlled syncing over a segment of
307f79e2abbSAndrew Morton * a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is
308c553ea4fSAmir Goldstein * zero then ksys_sync_file_range() will operate from offset out to EOF.
309f79e2abbSAndrew Morton *
310f79e2abbSAndrew Morton * The flag bits are:
311f79e2abbSAndrew Morton *
312f79e2abbSAndrew Morton * SYNC_FILE_RANGE_WAIT_BEFORE: wait upon writeout of all pages in the range
313f79e2abbSAndrew Morton * before performing the write.
314f79e2abbSAndrew Morton *
315f79e2abbSAndrew Morton * SYNC_FILE_RANGE_WRITE: initiate writeout of all those dirty pages in the
316cce77081SPavel Machek * range which are not presently under writeback. Note that this may block for
317cce77081SPavel Machek * significant periods due to exhaustion of disk request structures.
318f79e2abbSAndrew Morton *
319f79e2abbSAndrew Morton * SYNC_FILE_RANGE_WAIT_AFTER: wait upon writeout of all pages in the range
320f79e2abbSAndrew Morton * after performing the write.
321f79e2abbSAndrew Morton *
322f79e2abbSAndrew Morton * Useful combinations of the flag bits are:
323f79e2abbSAndrew Morton *
324f79e2abbSAndrew Morton * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE: ensures that all pages
325c553ea4fSAmir Goldstein * in the range which were dirty on entry to ksys_sync_file_range() are placed
326f79e2abbSAndrew Morton * under writeout. This is a start-write-for-data-integrity operation.
327f79e2abbSAndrew Morton *
328f79e2abbSAndrew Morton * SYNC_FILE_RANGE_WRITE: start writeout of all dirty pages in the range which
329f79e2abbSAndrew Morton * are not presently under writeout. This is an asynchronous flush-to-disk
330f79e2abbSAndrew Morton * operation. Not suitable for data integrity operations.
331f79e2abbSAndrew Morton *
332f79e2abbSAndrew Morton * SYNC_FILE_RANGE_WAIT_BEFORE (or SYNC_FILE_RANGE_WAIT_AFTER): wait for
333f79e2abbSAndrew Morton * completion of writeout of all pages in the range. This will be used after an
334f79e2abbSAndrew Morton * earlier SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE operation to wait
335f79e2abbSAndrew Morton * for that operation to complete and to return the result.
336f79e2abbSAndrew Morton *
337c553ea4fSAmir Goldstein * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE|SYNC_FILE_RANGE_WAIT_AFTER
338c553ea4fSAmir Goldstein * (a.k.a. SYNC_FILE_RANGE_WRITE_AND_WAIT):
339f79e2abbSAndrew Morton * a traditional sync() operation. This is a write-for-data-integrity operation
340f79e2abbSAndrew Morton * which will ensure that all pages in the range which were dirty on entry to
341c553ea4fSAmir Goldstein * ksys_sync_file_range() are written to disk. It should be noted that disk
342c553ea4fSAmir Goldstein * caches are not flushed by this call, so there are no guarantees here that the
343c553ea4fSAmir Goldstein * data will be available on disk after a crash.
344f79e2abbSAndrew Morton *
345f79e2abbSAndrew Morton *
346f79e2abbSAndrew Morton * SYNC_FILE_RANGE_WAIT_BEFORE and SYNC_FILE_RANGE_WAIT_AFTER will detect any
347f79e2abbSAndrew Morton * I/O errors or ENOSPC conditions and will return those to the caller, after
348f79e2abbSAndrew Morton * clearing the EIO and ENOSPC flags in the address_space.
349f79e2abbSAndrew Morton *
350f79e2abbSAndrew Morton * It should be noted that none of these operations write out the file's
351f79e2abbSAndrew Morton * metadata. So unless the application is strictly performing overwrites of
352f79e2abbSAndrew Morton * already-instantiated disk blocks, there are no guarantees here that the data
353f79e2abbSAndrew Morton * will be available after a crash.
354f79e2abbSAndrew Morton */
ksys_sync_file_range(int fd,loff_t offset,loff_t nbytes,unsigned int flags)355806cbae1SDominik Brodowski int ksys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
356806cbae1SDominik Brodowski unsigned int flags)
357f79e2abbSAndrew Morton {
358f79e2abbSAndrew Morton int ret;
3592903ff01SAl Viro struct fd f;
360f79e2abbSAndrew Morton
361f79e2abbSAndrew Morton ret = -EBADF;
3622903ff01SAl Viro f = fdget(fd);
36322f96b38SJens Axboe if (f.file)
36422f96b38SJens Axboe ret = sync_file_range(f.file, offset, nbytes, flags);
365f79e2abbSAndrew Morton
3662903ff01SAl Viro fdput(f);
367f79e2abbSAndrew Morton return ret;
368f79e2abbSAndrew Morton }
369f79e2abbSAndrew Morton
SYSCALL_DEFINE4(sync_file_range,int,fd,loff_t,offset,loff_t,nbytes,unsigned int,flags)370806cbae1SDominik Brodowski SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes,
371806cbae1SDominik Brodowski unsigned int, flags)
372806cbae1SDominik Brodowski {
373806cbae1SDominik Brodowski return ksys_sync_file_range(fd, offset, nbytes, flags);
374806cbae1SDominik Brodowski }
375806cbae1SDominik Brodowski
376*59c10c52SGuo Ren #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_SYNC_FILE_RANGE)
COMPAT_SYSCALL_DEFINE6(sync_file_range,int,fd,compat_arg_u64_dual (offset),compat_arg_u64_dual (nbytes),unsigned int,flags)377*59c10c52SGuo Ren COMPAT_SYSCALL_DEFINE6(sync_file_range, int, fd, compat_arg_u64_dual(offset),
378*59c10c52SGuo Ren compat_arg_u64_dual(nbytes), unsigned int, flags)
379*59c10c52SGuo Ren {
380*59c10c52SGuo Ren return ksys_sync_file_range(fd, compat_arg_u64_glue(offset),
381*59c10c52SGuo Ren compat_arg_u64_glue(nbytes), flags);
382*59c10c52SGuo Ren }
383*59c10c52SGuo Ren #endif
384*59c10c52SGuo Ren
385edd5cd4aSDavid Woodhouse /* It would be nice if people remember that not all the world's an i386
386edd5cd4aSDavid Woodhouse when they introduce new system calls */
SYSCALL_DEFINE4(sync_file_range2,int,fd,unsigned int,flags,loff_t,offset,loff_t,nbytes)3874a0fd5bfSAl Viro SYSCALL_DEFINE4(sync_file_range2, int, fd, unsigned int, flags,
3884a0fd5bfSAl Viro loff_t, offset, loff_t, nbytes)
389edd5cd4aSDavid Woodhouse {
390806cbae1SDominik Brodowski return ksys_sync_file_range(fd, offset, nbytes, flags);
391edd5cd4aSDavid Woodhouse }
392