xref: /openbmc/linux/fs/read_write.c (revision 1771e9fb)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   *  linux/fs/read_write.c
4   *
5   *  Copyright (C) 1991, 1992  Linus Torvalds
6   */
7  
8  #include <linux/slab.h>
9  #include <linux/stat.h>
10  #include <linux/sched/xacct.h>
11  #include <linux/fcntl.h>
12  #include <linux/file.h>
13  #include <linux/uio.h>
14  #include <linux/fsnotify.h>
15  #include <linux/security.h>
16  #include <linux/export.h>
17  #include <linux/syscalls.h>
18  #include <linux/pagemap.h>
19  #include <linux/splice.h>
20  #include <linux/compat.h>
21  #include <linux/mount.h>
22  #include <linux/fs.h>
23  #include "internal.h"
24  
25  #include <linux/uaccess.h>
26  #include <asm/unistd.h>
27  
28  const struct file_operations generic_ro_fops = {
29  	.llseek		= generic_file_llseek,
30  	.read_iter	= generic_file_read_iter,
31  	.mmap		= generic_file_readonly_mmap,
32  	.splice_read	= generic_file_splice_read,
33  };
34  
35  EXPORT_SYMBOL(generic_ro_fops);
36  
37  static inline bool unsigned_offsets(struct file *file)
38  {
39  	return file->f_mode & FMODE_UNSIGNED_OFFSET;
40  }
41  
42  /**
43   * vfs_setpos - update the file offset for lseek
44   * @file:	file structure in question
45   * @offset:	file offset to seek to
46   * @maxsize:	maximum file size
47   *
48   * This is a low-level filesystem helper for updating the file offset to
49   * the value specified by @offset if the given offset is valid and it is
50   * not equal to the current file offset.
51   *
52   * Return the specified offset on success and -EINVAL on invalid offset.
53   */
54  loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize)
55  {
56  	if (offset < 0 && !unsigned_offsets(file))
57  		return -EINVAL;
58  	if (offset > maxsize)
59  		return -EINVAL;
60  
61  	if (offset != file->f_pos) {
62  		file->f_pos = offset;
63  		file->f_version = 0;
64  	}
65  	return offset;
66  }
67  EXPORT_SYMBOL(vfs_setpos);
68  
69  /**
70   * generic_file_llseek_size - generic llseek implementation for regular files
71   * @file:	file structure to seek on
72   * @offset:	file offset to seek to
73   * @whence:	type of seek
74   * @size:	max size of this file in file system
75   * @eof:	offset used for SEEK_END position
76   *
77   * This is a variant of generic_file_llseek that allows passing in a custom
78   * maximum file size and a custom EOF position, for e.g. hashed directories
79   *
80   * Synchronization:
81   * SEEK_SET and SEEK_END are unsynchronized (but atomic on 64bit platforms)
82   * SEEK_CUR is synchronized against other SEEK_CURs, but not read/writes.
83   * read/writes behave like SEEK_SET against seeks.
84   */
85  loff_t
86  generic_file_llseek_size(struct file *file, loff_t offset, int whence,
87  		loff_t maxsize, loff_t eof)
88  {
89  	switch (whence) {
90  	case SEEK_END:
91  		offset += eof;
92  		break;
93  	case SEEK_CUR:
94  		/*
95  		 * Here we special-case the lseek(fd, 0, SEEK_CUR)
96  		 * position-querying operation.  Avoid rewriting the "same"
97  		 * f_pos value back to the file because a concurrent read(),
98  		 * write() or lseek() might have altered it
99  		 */
100  		if (offset == 0)
101  			return file->f_pos;
102  		/*
103  		 * f_lock protects against read/modify/write race with other
104  		 * SEEK_CURs. Note that parallel writes and reads behave
105  		 * like SEEK_SET.
106  		 */
107  		spin_lock(&file->f_lock);
108  		offset = vfs_setpos(file, file->f_pos + offset, maxsize);
109  		spin_unlock(&file->f_lock);
110  		return offset;
111  	case SEEK_DATA:
112  		/*
113  		 * In the generic case the entire file is data, so as long as
114  		 * offset isn't at the end of the file then the offset is data.
115  		 */
116  		if ((unsigned long long)offset >= eof)
117  			return -ENXIO;
118  		break;
119  	case SEEK_HOLE:
120  		/*
121  		 * There is a virtual hole at the end of the file, so as long as
122  		 * offset isn't i_size or larger, return i_size.
123  		 */
124  		if ((unsigned long long)offset >= eof)
125  			return -ENXIO;
126  		offset = eof;
127  		break;
128  	}
129  
130  	return vfs_setpos(file, offset, maxsize);
131  }
132  EXPORT_SYMBOL(generic_file_llseek_size);
133  
134  /**
135   * generic_file_llseek - generic llseek implementation for regular files
136   * @file:	file structure to seek on
137   * @offset:	file offset to seek to
138   * @whence:	type of seek
139   *
140   * This is a generic implemenation of ->llseek useable for all normal local
141   * filesystems.  It just updates the file offset to the value specified by
142   * @offset and @whence.
143   */
144  loff_t generic_file_llseek(struct file *file, loff_t offset, int whence)
145  {
146  	struct inode *inode = file->f_mapping->host;
147  
148  	return generic_file_llseek_size(file, offset, whence,
149  					inode->i_sb->s_maxbytes,
150  					i_size_read(inode));
151  }
152  EXPORT_SYMBOL(generic_file_llseek);
153  
154  /**
155   * fixed_size_llseek - llseek implementation for fixed-sized devices
156   * @file:	file structure to seek on
157   * @offset:	file offset to seek to
158   * @whence:	type of seek
159   * @size:	size of the file
160   *
161   */
162  loff_t fixed_size_llseek(struct file *file, loff_t offset, int whence, loff_t size)
163  {
164  	switch (whence) {
165  	case SEEK_SET: case SEEK_CUR: case SEEK_END:
166  		return generic_file_llseek_size(file, offset, whence,
167  						size, size);
168  	default:
169  		return -EINVAL;
170  	}
171  }
172  EXPORT_SYMBOL(fixed_size_llseek);
173  
174  /**
175   * no_seek_end_llseek - llseek implementation for fixed-sized devices
176   * @file:	file structure to seek on
177   * @offset:	file offset to seek to
178   * @whence:	type of seek
179   *
180   */
181  loff_t no_seek_end_llseek(struct file *file, loff_t offset, int whence)
182  {
183  	switch (whence) {
184  	case SEEK_SET: case SEEK_CUR:
185  		return generic_file_llseek_size(file, offset, whence,
186  						OFFSET_MAX, 0);
187  	default:
188  		return -EINVAL;
189  	}
190  }
191  EXPORT_SYMBOL(no_seek_end_llseek);
192  
193  /**
194   * no_seek_end_llseek_size - llseek implementation for fixed-sized devices
195   * @file:	file structure to seek on
196   * @offset:	file offset to seek to
197   * @whence:	type of seek
198   * @size:	maximal offset allowed
199   *
200   */
201  loff_t no_seek_end_llseek_size(struct file *file, loff_t offset, int whence, loff_t size)
202  {
203  	switch (whence) {
204  	case SEEK_SET: case SEEK_CUR:
205  		return generic_file_llseek_size(file, offset, whence,
206  						size, 0);
207  	default:
208  		return -EINVAL;
209  	}
210  }
211  EXPORT_SYMBOL(no_seek_end_llseek_size);
212  
213  /**
214   * noop_llseek - No Operation Performed llseek implementation
215   * @file:	file structure to seek on
216   * @offset:	file offset to seek to
217   * @whence:	type of seek
218   *
219   * This is an implementation of ->llseek useable for the rare special case when
220   * userspace expects the seek to succeed but the (device) file is actually not
221   * able to perform the seek. In this case you use noop_llseek() instead of
222   * falling back to the default implementation of ->llseek.
223   */
224  loff_t noop_llseek(struct file *file, loff_t offset, int whence)
225  {
226  	return file->f_pos;
227  }
228  EXPORT_SYMBOL(noop_llseek);
229  
230  loff_t no_llseek(struct file *file, loff_t offset, int whence)
231  {
232  	return -ESPIPE;
233  }
234  EXPORT_SYMBOL(no_llseek);
235  
236  loff_t default_llseek(struct file *file, loff_t offset, int whence)
237  {
238  	struct inode *inode = file_inode(file);
239  	loff_t retval;
240  
241  	inode_lock(inode);
242  	switch (whence) {
243  		case SEEK_END:
244  			offset += i_size_read(inode);
245  			break;
246  		case SEEK_CUR:
247  			if (offset == 0) {
248  				retval = file->f_pos;
249  				goto out;
250  			}
251  			offset += file->f_pos;
252  			break;
253  		case SEEK_DATA:
254  			/*
255  			 * In the generic case the entire file is data, so as
256  			 * long as offset isn't at the end of the file then the
257  			 * offset is data.
258  			 */
259  			if (offset >= inode->i_size) {
260  				retval = -ENXIO;
261  				goto out;
262  			}
263  			break;
264  		case SEEK_HOLE:
265  			/*
266  			 * There is a virtual hole at the end of the file, so
267  			 * as long as offset isn't i_size or larger, return
268  			 * i_size.
269  			 */
270  			if (offset >= inode->i_size) {
271  				retval = -ENXIO;
272  				goto out;
273  			}
274  			offset = inode->i_size;
275  			break;
276  	}
277  	retval = -EINVAL;
278  	if (offset >= 0 || unsigned_offsets(file)) {
279  		if (offset != file->f_pos) {
280  			file->f_pos = offset;
281  			file->f_version = 0;
282  		}
283  		retval = offset;
284  	}
285  out:
286  	inode_unlock(inode);
287  	return retval;
288  }
289  EXPORT_SYMBOL(default_llseek);
290  
291  loff_t vfs_llseek(struct file *file, loff_t offset, int whence)
292  {
293  	loff_t (*fn)(struct file *, loff_t, int);
294  
295  	fn = no_llseek;
296  	if (file->f_mode & FMODE_LSEEK) {
297  		if (file->f_op->llseek)
298  			fn = file->f_op->llseek;
299  	}
300  	return fn(file, offset, whence);
301  }
302  EXPORT_SYMBOL(vfs_llseek);
303  
304  static off_t ksys_lseek(unsigned int fd, off_t offset, unsigned int whence)
305  {
306  	off_t retval;
307  	struct fd f = fdget_pos(fd);
308  	if (!f.file)
309  		return -EBADF;
310  
311  	retval = -EINVAL;
312  	if (whence <= SEEK_MAX) {
313  		loff_t res = vfs_llseek(f.file, offset, whence);
314  		retval = res;
315  		if (res != (loff_t)retval)
316  			retval = -EOVERFLOW;	/* LFS: should only happen on 32 bit platforms */
317  	}
318  	fdput_pos(f);
319  	return retval;
320  }
321  
322  SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence)
323  {
324  	return ksys_lseek(fd, offset, whence);
325  }
326  
327  #ifdef CONFIG_COMPAT
328  COMPAT_SYSCALL_DEFINE3(lseek, unsigned int, fd, compat_off_t, offset, unsigned int, whence)
329  {
330  	return ksys_lseek(fd, offset, whence);
331  }
332  #endif
333  
334  #if !defined(CONFIG_64BIT) || defined(CONFIG_COMPAT) || \
335  	defined(__ARCH_WANT_SYS_LLSEEK)
336  SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
337  		unsigned long, offset_low, loff_t __user *, result,
338  		unsigned int, whence)
339  {
340  	int retval;
341  	struct fd f = fdget_pos(fd);
342  	loff_t offset;
343  
344  	if (!f.file)
345  		return -EBADF;
346  
347  	retval = -EINVAL;
348  	if (whence > SEEK_MAX)
349  		goto out_putf;
350  
351  	offset = vfs_llseek(f.file, ((loff_t) offset_high << 32) | offset_low,
352  			whence);
353  
354  	retval = (int)offset;
355  	if (offset >= 0) {
356  		retval = -EFAULT;
357  		if (!copy_to_user(result, &offset, sizeof(offset)))
358  			retval = 0;
359  	}
360  out_putf:
361  	fdput_pos(f);
362  	return retval;
363  }
364  #endif
365  
366  int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t count)
367  {
368  	struct inode *inode;
369  	int retval = -EINVAL;
370  
371  	inode = file_inode(file);
372  	if (unlikely((ssize_t) count < 0))
373  		return retval;
374  
375  	/*
376  	 * ranged mandatory locking does not apply to streams - it makes sense
377  	 * only for files where position has a meaning.
378  	 */
379  	if (ppos) {
380  		loff_t pos = *ppos;
381  
382  		if (unlikely(pos < 0)) {
383  			if (!unsigned_offsets(file))
384  				return retval;
385  			if (count >= -pos) /* both values are in 0..LLONG_MAX */
386  				return -EOVERFLOW;
387  		} else if (unlikely((loff_t) (pos + count) < 0)) {
388  			if (!unsigned_offsets(file))
389  				return retval;
390  		}
391  
392  		if (unlikely(inode->i_flctx && mandatory_lock(inode))) {
393  			retval = locks_mandatory_area(inode, file, pos, pos + count - 1,
394  					read_write == READ ? F_RDLCK : F_WRLCK);
395  			if (retval < 0)
396  				return retval;
397  		}
398  	}
399  
400  	return security_file_permission(file,
401  				read_write == READ ? MAY_READ : MAY_WRITE);
402  }
403  
404  static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
405  {
406  	struct iovec iov = { .iov_base = buf, .iov_len = len };
407  	struct kiocb kiocb;
408  	struct iov_iter iter;
409  	ssize_t ret;
410  
411  	init_sync_kiocb(&kiocb, filp);
412  	kiocb.ki_pos = (ppos ? *ppos : 0);
413  	iov_iter_init(&iter, READ, &iov, 1, len);
414  
415  	ret = call_read_iter(filp, &kiocb, &iter);
416  	BUG_ON(ret == -EIOCBQUEUED);
417  	if (ppos)
418  		*ppos = kiocb.ki_pos;
419  	return ret;
420  }
421  
422  ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos)
423  {
424  	mm_segment_t old_fs = get_fs();
425  	ssize_t ret;
426  
427  	if (WARN_ON_ONCE(!(file->f_mode & FMODE_READ)))
428  		return -EINVAL;
429  	if (!(file->f_mode & FMODE_CAN_READ))
430  		return -EINVAL;
431  
432  	if (count > MAX_RW_COUNT)
433  		count =  MAX_RW_COUNT;
434  	set_fs(KERNEL_DS);
435  	if (file->f_op->read)
436  		ret = file->f_op->read(file, (void __user *)buf, count, pos);
437  	else if (file->f_op->read_iter)
438  		ret = new_sync_read(file, (void __user *)buf, count, pos);
439  	else
440  		ret = -EINVAL;
441  	set_fs(old_fs);
442  	if (ret > 0) {
443  		fsnotify_access(file);
444  		add_rchar(current, ret);
445  	}
446  	inc_syscr(current);
447  	return ret;
448  }
449  
450  ssize_t kernel_read(struct file *file, void *buf, size_t count, loff_t *pos)
451  {
452  	ssize_t ret;
453  
454  	ret = rw_verify_area(READ, file, pos, count);
455  	if (ret)
456  		return ret;
457  	return __kernel_read(file, buf, count, pos);
458  }
459  EXPORT_SYMBOL(kernel_read);
460  
461  ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
462  {
463  	ssize_t ret;
464  
465  	if (!(file->f_mode & FMODE_READ))
466  		return -EBADF;
467  	if (!(file->f_mode & FMODE_CAN_READ))
468  		return -EINVAL;
469  	if (unlikely(!access_ok(buf, count)))
470  		return -EFAULT;
471  
472  	ret = rw_verify_area(READ, file, pos, count);
473  	if (ret)
474  		return ret;
475  	if (count > MAX_RW_COUNT)
476  		count =  MAX_RW_COUNT;
477  
478  	if (file->f_op->read)
479  		ret = file->f_op->read(file, buf, count, pos);
480  	else if (file->f_op->read_iter)
481  		ret = new_sync_read(file, buf, count, pos);
482  	else
483  		ret = -EINVAL;
484  	if (ret > 0) {
485  		fsnotify_access(file);
486  		add_rchar(current, ret);
487  	}
488  	inc_syscr(current);
489  	return ret;
490  }
491  
492  static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
493  {
494  	struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
495  	struct kiocb kiocb;
496  	struct iov_iter iter;
497  	ssize_t ret;
498  
499  	init_sync_kiocb(&kiocb, filp);
500  	kiocb.ki_pos = (ppos ? *ppos : 0);
501  	iov_iter_init(&iter, WRITE, &iov, 1, len);
502  
503  	ret = call_write_iter(filp, &kiocb, &iter);
504  	BUG_ON(ret == -EIOCBQUEUED);
505  	if (ret > 0 && ppos)
506  		*ppos = kiocb.ki_pos;
507  	return ret;
508  }
509  
510  /* caller is responsible for file_start_write/file_end_write */
511  ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t *pos)
512  {
513  	mm_segment_t old_fs;
514  	const char __user *p;
515  	ssize_t ret;
516  
517  	if (WARN_ON_ONCE(!(file->f_mode & FMODE_WRITE)))
518  		return -EBADF;
519  	if (!(file->f_mode & FMODE_CAN_WRITE))
520  		return -EINVAL;
521  
522  	old_fs = get_fs();
523  	set_fs(KERNEL_DS);
524  	p = (__force const char __user *)buf;
525  	if (count > MAX_RW_COUNT)
526  		count =  MAX_RW_COUNT;
527  	if (file->f_op->write)
528  		ret = file->f_op->write(file, p, count, pos);
529  	else if (file->f_op->write_iter)
530  		ret = new_sync_write(file, p, count, pos);
531  	else
532  		ret = -EINVAL;
533  	set_fs(old_fs);
534  	if (ret > 0) {
535  		fsnotify_modify(file);
536  		add_wchar(current, ret);
537  	}
538  	inc_syscw(current);
539  	return ret;
540  }
541  
542  ssize_t kernel_write(struct file *file, const void *buf, size_t count,
543  			    loff_t *pos)
544  {
545  	ssize_t ret;
546  
547  	ret = rw_verify_area(WRITE, file, pos, count);
548  	if (ret)
549  		return ret;
550  
551  	file_start_write(file);
552  	ret =  __kernel_write(file, buf, count, pos);
553  	file_end_write(file);
554  	return ret;
555  }
556  EXPORT_SYMBOL(kernel_write);
557  
558  ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
559  {
560  	ssize_t ret;
561  
562  	if (!(file->f_mode & FMODE_WRITE))
563  		return -EBADF;
564  	if (!(file->f_mode & FMODE_CAN_WRITE))
565  		return -EINVAL;
566  	if (unlikely(!access_ok(buf, count)))
567  		return -EFAULT;
568  
569  	ret = rw_verify_area(WRITE, file, pos, count);
570  	if (ret)
571  		return ret;
572  	if (count > MAX_RW_COUNT)
573  		count =  MAX_RW_COUNT;
574  	file_start_write(file);
575  	if (file->f_op->write)
576  		ret = file->f_op->write(file, buf, count, pos);
577  	else if (file->f_op->write_iter)
578  		ret = new_sync_write(file, buf, count, pos);
579  	else
580  		ret = -EINVAL;
581  	if (ret > 0) {
582  		fsnotify_modify(file);
583  		add_wchar(current, ret);
584  	}
585  	inc_syscw(current);
586  	file_end_write(file);
587  	return ret;
588  }
589  
590  /* file_ppos returns &file->f_pos or NULL if file is stream */
591  static inline loff_t *file_ppos(struct file *file)
592  {
593  	return file->f_mode & FMODE_STREAM ? NULL : &file->f_pos;
594  }
595  
596  ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count)
597  {
598  	struct fd f = fdget_pos(fd);
599  	ssize_t ret = -EBADF;
600  
601  	if (f.file) {
602  		loff_t pos, *ppos = file_ppos(f.file);
603  		if (ppos) {
604  			pos = *ppos;
605  			ppos = &pos;
606  		}
607  		ret = vfs_read(f.file, buf, count, ppos);
608  		if (ret >= 0 && ppos)
609  			f.file->f_pos = pos;
610  		fdput_pos(f);
611  	}
612  	return ret;
613  }
614  
615  SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count)
616  {
617  	return ksys_read(fd, buf, count);
618  }
619  
620  ssize_t ksys_write(unsigned int fd, const char __user *buf, size_t count)
621  {
622  	struct fd f = fdget_pos(fd);
623  	ssize_t ret = -EBADF;
624  
625  	if (f.file) {
626  		loff_t pos, *ppos = file_ppos(f.file);
627  		if (ppos) {
628  			pos = *ppos;
629  			ppos = &pos;
630  		}
631  		ret = vfs_write(f.file, buf, count, ppos);
632  		if (ret >= 0 && ppos)
633  			f.file->f_pos = pos;
634  		fdput_pos(f);
635  	}
636  
637  	return ret;
638  }
639  
640  SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf,
641  		size_t, count)
642  {
643  	return ksys_write(fd, buf, count);
644  }
645  
646  ssize_t ksys_pread64(unsigned int fd, char __user *buf, size_t count,
647  		     loff_t pos)
648  {
649  	struct fd f;
650  	ssize_t ret = -EBADF;
651  
652  	if (pos < 0)
653  		return -EINVAL;
654  
655  	f = fdget(fd);
656  	if (f.file) {
657  		ret = -ESPIPE;
658  		if (f.file->f_mode & FMODE_PREAD)
659  			ret = vfs_read(f.file, buf, count, &pos);
660  		fdput(f);
661  	}
662  
663  	return ret;
664  }
665  
666  SYSCALL_DEFINE4(pread64, unsigned int, fd, char __user *, buf,
667  			size_t, count, loff_t, pos)
668  {
669  	return ksys_pread64(fd, buf, count, pos);
670  }
671  
672  ssize_t ksys_pwrite64(unsigned int fd, const char __user *buf,
673  		      size_t count, loff_t pos)
674  {
675  	struct fd f;
676  	ssize_t ret = -EBADF;
677  
678  	if (pos < 0)
679  		return -EINVAL;
680  
681  	f = fdget(fd);
682  	if (f.file) {
683  		ret = -ESPIPE;
684  		if (f.file->f_mode & FMODE_PWRITE)
685  			ret = vfs_write(f.file, buf, count, &pos);
686  		fdput(f);
687  	}
688  
689  	return ret;
690  }
691  
692  SYSCALL_DEFINE4(pwrite64, unsigned int, fd, const char __user *, buf,
693  			 size_t, count, loff_t, pos)
694  {
695  	return ksys_pwrite64(fd, buf, count, pos);
696  }
697  
698  static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter,
699  		loff_t *ppos, int type, rwf_t flags)
700  {
701  	struct kiocb kiocb;
702  	ssize_t ret;
703  
704  	init_sync_kiocb(&kiocb, filp);
705  	ret = kiocb_set_rw_flags(&kiocb, flags);
706  	if (ret)
707  		return ret;
708  	kiocb.ki_pos = (ppos ? *ppos : 0);
709  
710  	if (type == READ)
711  		ret = call_read_iter(filp, &kiocb, iter);
712  	else
713  		ret = call_write_iter(filp, &kiocb, iter);
714  	BUG_ON(ret == -EIOCBQUEUED);
715  	if (ppos)
716  		*ppos = kiocb.ki_pos;
717  	return ret;
718  }
719  
720  /* Do it by hand, with file-ops */
721  static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter,
722  		loff_t *ppos, int type, rwf_t flags)
723  {
724  	ssize_t ret = 0;
725  
726  	if (flags & ~RWF_HIPRI)
727  		return -EOPNOTSUPP;
728  
729  	while (iov_iter_count(iter)) {
730  		struct iovec iovec = iov_iter_iovec(iter);
731  		ssize_t nr;
732  
733  		if (type == READ) {
734  			nr = filp->f_op->read(filp, iovec.iov_base,
735  					      iovec.iov_len, ppos);
736  		} else {
737  			nr = filp->f_op->write(filp, iovec.iov_base,
738  					       iovec.iov_len, ppos);
739  		}
740  
741  		if (nr < 0) {
742  			if (!ret)
743  				ret = nr;
744  			break;
745  		}
746  		ret += nr;
747  		if (nr != iovec.iov_len)
748  			break;
749  		iov_iter_advance(iter, nr);
750  	}
751  
752  	return ret;
753  }
754  
755  /**
756   * rw_copy_check_uvector() - Copy an array of &struct iovec from userspace
757   *     into the kernel and check that it is valid.
758   *
759   * @type: One of %CHECK_IOVEC_ONLY, %READ, or %WRITE.
760   * @uvector: Pointer to the userspace array.
761   * @nr_segs: Number of elements in userspace array.
762   * @fast_segs: Number of elements in @fast_pointer.
763   * @fast_pointer: Pointer to (usually small on-stack) kernel array.
764   * @ret_pointer: (output parameter) Pointer to a variable that will point to
765   *     either @fast_pointer, a newly allocated kernel array, or NULL,
766   *     depending on which array was used.
767   *
768   * This function copies an array of &struct iovec of @nr_segs from
769   * userspace into the kernel and checks that each element is valid (e.g.
770   * it does not point to a kernel address or cause overflow by being too
771   * large, etc.).
772   *
773   * As an optimization, the caller may provide a pointer to a small
774   * on-stack array in @fast_pointer, typically %UIO_FASTIOV elements long
775   * (the size of this array, or 0 if unused, should be given in @fast_segs).
776   *
777   * @ret_pointer will always point to the array that was used, so the
778   * caller must take care not to call kfree() on it e.g. in case the
779   * @fast_pointer array was used and it was allocated on the stack.
780   *
781   * Return: The total number of bytes covered by the iovec array on success
782   *   or a negative error code on error.
783   */
784  ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
785  			      unsigned long nr_segs, unsigned long fast_segs,
786  			      struct iovec *fast_pointer,
787  			      struct iovec **ret_pointer)
788  {
789  	unsigned long seg;
790  	ssize_t ret;
791  	struct iovec *iov = fast_pointer;
792  
793  	/*
794  	 * SuS says "The readv() function *may* fail if the iovcnt argument
795  	 * was less than or equal to 0, or greater than {IOV_MAX}.  Linux has
796  	 * traditionally returned zero for zero segments, so...
797  	 */
798  	if (nr_segs == 0) {
799  		ret = 0;
800  		goto out;
801  	}
802  
803  	/*
804  	 * First get the "struct iovec" from user memory and
805  	 * verify all the pointers
806  	 */
807  	if (nr_segs > UIO_MAXIOV) {
808  		ret = -EINVAL;
809  		goto out;
810  	}
811  	if (nr_segs > fast_segs) {
812  		iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
813  		if (iov == NULL) {
814  			ret = -ENOMEM;
815  			goto out;
816  		}
817  	}
818  	if (copy_from_user(iov, uvector, nr_segs*sizeof(*uvector))) {
819  		ret = -EFAULT;
820  		goto out;
821  	}
822  
823  	/*
824  	 * According to the Single Unix Specification we should return EINVAL
825  	 * if an element length is < 0 when cast to ssize_t or if the
826  	 * total length would overflow the ssize_t return value of the
827  	 * system call.
828  	 *
829  	 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
830  	 * overflow case.
831  	 */
832  	ret = 0;
833  	for (seg = 0; seg < nr_segs; seg++) {
834  		void __user *buf = iov[seg].iov_base;
835  		ssize_t len = (ssize_t)iov[seg].iov_len;
836  
837  		/* see if we we're about to use an invalid len or if
838  		 * it's about to overflow ssize_t */
839  		if (len < 0) {
840  			ret = -EINVAL;
841  			goto out;
842  		}
843  		if (type >= 0
844  		    && unlikely(!access_ok(buf, len))) {
845  			ret = -EFAULT;
846  			goto out;
847  		}
848  		if (len > MAX_RW_COUNT - ret) {
849  			len = MAX_RW_COUNT - ret;
850  			iov[seg].iov_len = len;
851  		}
852  		ret += len;
853  	}
854  out:
855  	*ret_pointer = iov;
856  	return ret;
857  }
858  
859  #ifdef CONFIG_COMPAT
860  ssize_t compat_rw_copy_check_uvector(int type,
861  		const struct compat_iovec __user *uvector, unsigned long nr_segs,
862  		unsigned long fast_segs, struct iovec *fast_pointer,
863  		struct iovec **ret_pointer)
864  {
865  	compat_ssize_t tot_len;
866  	struct iovec *iov = *ret_pointer = fast_pointer;
867  	ssize_t ret = 0;
868  	int seg;
869  
870  	/*
871  	 * SuS says "The readv() function *may* fail if the iovcnt argument
872  	 * was less than or equal to 0, or greater than {IOV_MAX}.  Linux has
873  	 * traditionally returned zero for zero segments, so...
874  	 */
875  	if (nr_segs == 0)
876  		goto out;
877  
878  	ret = -EINVAL;
879  	if (nr_segs > UIO_MAXIOV)
880  		goto out;
881  	if (nr_segs > fast_segs) {
882  		ret = -ENOMEM;
883  		iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
884  		if (iov == NULL)
885  			goto out;
886  	}
887  	*ret_pointer = iov;
888  
889  	ret = -EFAULT;
890  	if (!access_ok(uvector, nr_segs*sizeof(*uvector)))
891  		goto out;
892  
893  	/*
894  	 * Single unix specification:
895  	 * We should -EINVAL if an element length is not >= 0 and fitting an
896  	 * ssize_t.
897  	 *
898  	 * In Linux, the total length is limited to MAX_RW_COUNT, there is
899  	 * no overflow possibility.
900  	 */
901  	tot_len = 0;
902  	ret = -EINVAL;
903  	for (seg = 0; seg < nr_segs; seg++) {
904  		compat_uptr_t buf;
905  		compat_ssize_t len;
906  
907  		if (__get_user(len, &uvector->iov_len) ||
908  		   __get_user(buf, &uvector->iov_base)) {
909  			ret = -EFAULT;
910  			goto out;
911  		}
912  		if (len < 0)	/* size_t not fitting in compat_ssize_t .. */
913  			goto out;
914  		if (type >= 0 &&
915  		    !access_ok(compat_ptr(buf), len)) {
916  			ret = -EFAULT;
917  			goto out;
918  		}
919  		if (len > MAX_RW_COUNT - tot_len)
920  			len = MAX_RW_COUNT - tot_len;
921  		tot_len += len;
922  		iov->iov_base = compat_ptr(buf);
923  		iov->iov_len = (compat_size_t) len;
924  		uvector++;
925  		iov++;
926  	}
927  	ret = tot_len;
928  
929  out:
930  	return ret;
931  }
932  #endif
933  
934  static ssize_t do_iter_read(struct file *file, struct iov_iter *iter,
935  		loff_t *pos, rwf_t flags)
936  {
937  	size_t tot_len;
938  	ssize_t ret = 0;
939  
940  	if (!(file->f_mode & FMODE_READ))
941  		return -EBADF;
942  	if (!(file->f_mode & FMODE_CAN_READ))
943  		return -EINVAL;
944  
945  	tot_len = iov_iter_count(iter);
946  	if (!tot_len)
947  		goto out;
948  	ret = rw_verify_area(READ, file, pos, tot_len);
949  	if (ret < 0)
950  		return ret;
951  
952  	if (file->f_op->read_iter)
953  		ret = do_iter_readv_writev(file, iter, pos, READ, flags);
954  	else
955  		ret = do_loop_readv_writev(file, iter, pos, READ, flags);
956  out:
957  	if (ret >= 0)
958  		fsnotify_access(file);
959  	return ret;
960  }
961  
962  ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb,
963  			   struct iov_iter *iter)
964  {
965  	size_t tot_len;
966  	ssize_t ret = 0;
967  
968  	if (!file->f_op->read_iter)
969  		return -EINVAL;
970  	if (!(file->f_mode & FMODE_READ))
971  		return -EBADF;
972  	if (!(file->f_mode & FMODE_CAN_READ))
973  		return -EINVAL;
974  
975  	tot_len = iov_iter_count(iter);
976  	if (!tot_len)
977  		goto out;
978  	ret = rw_verify_area(READ, file, &iocb->ki_pos, tot_len);
979  	if (ret < 0)
980  		return ret;
981  
982  	ret = call_read_iter(file, iocb, iter);
983  out:
984  	if (ret >= 0)
985  		fsnotify_access(file);
986  	return ret;
987  }
988  EXPORT_SYMBOL(vfs_iocb_iter_read);
989  
990  ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
991  		rwf_t flags)
992  {
993  	if (!file->f_op->read_iter)
994  		return -EINVAL;
995  	return do_iter_read(file, iter, ppos, flags);
996  }
997  EXPORT_SYMBOL(vfs_iter_read);
998  
999  static ssize_t do_iter_write(struct file *file, struct iov_iter *iter,
1000  		loff_t *pos, rwf_t flags)
1001  {
1002  	size_t tot_len;
1003  	ssize_t ret = 0;
1004  
1005  	if (!(file->f_mode & FMODE_WRITE))
1006  		return -EBADF;
1007  	if (!(file->f_mode & FMODE_CAN_WRITE))
1008  		return -EINVAL;
1009  
1010  	tot_len = iov_iter_count(iter);
1011  	if (!tot_len)
1012  		return 0;
1013  	ret = rw_verify_area(WRITE, file, pos, tot_len);
1014  	if (ret < 0)
1015  		return ret;
1016  
1017  	if (file->f_op->write_iter)
1018  		ret = do_iter_readv_writev(file, iter, pos, WRITE, flags);
1019  	else
1020  		ret = do_loop_readv_writev(file, iter, pos, WRITE, flags);
1021  	if (ret > 0)
1022  		fsnotify_modify(file);
1023  	return ret;
1024  }
1025  
1026  ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb,
1027  			    struct iov_iter *iter)
1028  {
1029  	size_t tot_len;
1030  	ssize_t ret = 0;
1031  
1032  	if (!file->f_op->write_iter)
1033  		return -EINVAL;
1034  	if (!(file->f_mode & FMODE_WRITE))
1035  		return -EBADF;
1036  	if (!(file->f_mode & FMODE_CAN_WRITE))
1037  		return -EINVAL;
1038  
1039  	tot_len = iov_iter_count(iter);
1040  	if (!tot_len)
1041  		return 0;
1042  	ret = rw_verify_area(WRITE, file, &iocb->ki_pos, tot_len);
1043  	if (ret < 0)
1044  		return ret;
1045  
1046  	ret = call_write_iter(file, iocb, iter);
1047  	if (ret > 0)
1048  		fsnotify_modify(file);
1049  
1050  	return ret;
1051  }
1052  EXPORT_SYMBOL(vfs_iocb_iter_write);
1053  
1054  ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos,
1055  		rwf_t flags)
1056  {
1057  	if (!file->f_op->write_iter)
1058  		return -EINVAL;
1059  	return do_iter_write(file, iter, ppos, flags);
1060  }
1061  EXPORT_SYMBOL(vfs_iter_write);
1062  
1063  ssize_t vfs_readv(struct file *file, const struct iovec __user *vec,
1064  		  unsigned long vlen, loff_t *pos, rwf_t flags)
1065  {
1066  	struct iovec iovstack[UIO_FASTIOV];
1067  	struct iovec *iov = iovstack;
1068  	struct iov_iter iter;
1069  	ssize_t ret;
1070  
1071  	ret = import_iovec(READ, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
1072  	if (ret >= 0) {
1073  		ret = do_iter_read(file, &iter, pos, flags);
1074  		kfree(iov);
1075  	}
1076  
1077  	return ret;
1078  }
1079  
1080  static ssize_t vfs_writev(struct file *file, const struct iovec __user *vec,
1081  		   unsigned long vlen, loff_t *pos, rwf_t flags)
1082  {
1083  	struct iovec iovstack[UIO_FASTIOV];
1084  	struct iovec *iov = iovstack;
1085  	struct iov_iter iter;
1086  	ssize_t ret;
1087  
1088  	ret = import_iovec(WRITE, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
1089  	if (ret >= 0) {
1090  		file_start_write(file);
1091  		ret = do_iter_write(file, &iter, pos, flags);
1092  		file_end_write(file);
1093  		kfree(iov);
1094  	}
1095  	return ret;
1096  }
1097  
1098  static ssize_t do_readv(unsigned long fd, const struct iovec __user *vec,
1099  			unsigned long vlen, rwf_t flags)
1100  {
1101  	struct fd f = fdget_pos(fd);
1102  	ssize_t ret = -EBADF;
1103  
1104  	if (f.file) {
1105  		loff_t pos, *ppos = file_ppos(f.file);
1106  		if (ppos) {
1107  			pos = *ppos;
1108  			ppos = &pos;
1109  		}
1110  		ret = vfs_readv(f.file, vec, vlen, ppos, flags);
1111  		if (ret >= 0 && ppos)
1112  			f.file->f_pos = pos;
1113  		fdput_pos(f);
1114  	}
1115  
1116  	if (ret > 0)
1117  		add_rchar(current, ret);
1118  	inc_syscr(current);
1119  	return ret;
1120  }
1121  
1122  static ssize_t do_writev(unsigned long fd, const struct iovec __user *vec,
1123  			 unsigned long vlen, rwf_t flags)
1124  {
1125  	struct fd f = fdget_pos(fd);
1126  	ssize_t ret = -EBADF;
1127  
1128  	if (f.file) {
1129  		loff_t pos, *ppos = file_ppos(f.file);
1130  		if (ppos) {
1131  			pos = *ppos;
1132  			ppos = &pos;
1133  		}
1134  		ret = vfs_writev(f.file, vec, vlen, ppos, flags);
1135  		if (ret >= 0 && ppos)
1136  			f.file->f_pos = pos;
1137  		fdput_pos(f);
1138  	}
1139  
1140  	if (ret > 0)
1141  		add_wchar(current, ret);
1142  	inc_syscw(current);
1143  	return ret;
1144  }
1145  
1146  static inline loff_t pos_from_hilo(unsigned long high, unsigned long low)
1147  {
1148  #define HALF_LONG_BITS (BITS_PER_LONG / 2)
1149  	return (((loff_t)high << HALF_LONG_BITS) << HALF_LONG_BITS) | low;
1150  }
1151  
1152  static ssize_t do_preadv(unsigned long fd, const struct iovec __user *vec,
1153  			 unsigned long vlen, loff_t pos, rwf_t flags)
1154  {
1155  	struct fd f;
1156  	ssize_t ret = -EBADF;
1157  
1158  	if (pos < 0)
1159  		return -EINVAL;
1160  
1161  	f = fdget(fd);
1162  	if (f.file) {
1163  		ret = -ESPIPE;
1164  		if (f.file->f_mode & FMODE_PREAD)
1165  			ret = vfs_readv(f.file, vec, vlen, &pos, flags);
1166  		fdput(f);
1167  	}
1168  
1169  	if (ret > 0)
1170  		add_rchar(current, ret);
1171  	inc_syscr(current);
1172  	return ret;
1173  }
1174  
1175  static ssize_t do_pwritev(unsigned long fd, const struct iovec __user *vec,
1176  			  unsigned long vlen, loff_t pos, rwf_t flags)
1177  {
1178  	struct fd f;
1179  	ssize_t ret = -EBADF;
1180  
1181  	if (pos < 0)
1182  		return -EINVAL;
1183  
1184  	f = fdget(fd);
1185  	if (f.file) {
1186  		ret = -ESPIPE;
1187  		if (f.file->f_mode & FMODE_PWRITE)
1188  			ret = vfs_writev(f.file, vec, vlen, &pos, flags);
1189  		fdput(f);
1190  	}
1191  
1192  	if (ret > 0)
1193  		add_wchar(current, ret);
1194  	inc_syscw(current);
1195  	return ret;
1196  }
1197  
1198  SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
1199  		unsigned long, vlen)
1200  {
1201  	return do_readv(fd, vec, vlen, 0);
1202  }
1203  
1204  SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec,
1205  		unsigned long, vlen)
1206  {
1207  	return do_writev(fd, vec, vlen, 0);
1208  }
1209  
1210  SYSCALL_DEFINE5(preadv, unsigned long, fd, const struct iovec __user *, vec,
1211  		unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
1212  {
1213  	loff_t pos = pos_from_hilo(pos_h, pos_l);
1214  
1215  	return do_preadv(fd, vec, vlen, pos, 0);
1216  }
1217  
1218  SYSCALL_DEFINE6(preadv2, unsigned long, fd, const struct iovec __user *, vec,
1219  		unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h,
1220  		rwf_t, flags)
1221  {
1222  	loff_t pos = pos_from_hilo(pos_h, pos_l);
1223  
1224  	if (pos == -1)
1225  		return do_readv(fd, vec, vlen, flags);
1226  
1227  	return do_preadv(fd, vec, vlen, pos, flags);
1228  }
1229  
1230  SYSCALL_DEFINE5(pwritev, unsigned long, fd, const struct iovec __user *, vec,
1231  		unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
1232  {
1233  	loff_t pos = pos_from_hilo(pos_h, pos_l);
1234  
1235  	return do_pwritev(fd, vec, vlen, pos, 0);
1236  }
1237  
1238  SYSCALL_DEFINE6(pwritev2, unsigned long, fd, const struct iovec __user *, vec,
1239  		unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h,
1240  		rwf_t, flags)
1241  {
1242  	loff_t pos = pos_from_hilo(pos_h, pos_l);
1243  
1244  	if (pos == -1)
1245  		return do_writev(fd, vec, vlen, flags);
1246  
1247  	return do_pwritev(fd, vec, vlen, pos, flags);
1248  }
1249  
1250  #ifdef CONFIG_COMPAT
1251  static size_t compat_readv(struct file *file,
1252  			   const struct compat_iovec __user *vec,
1253  			   unsigned long vlen, loff_t *pos, rwf_t flags)
1254  {
1255  	struct iovec iovstack[UIO_FASTIOV];
1256  	struct iovec *iov = iovstack;
1257  	struct iov_iter iter;
1258  	ssize_t ret;
1259  
1260  	ret = compat_import_iovec(READ, vec, vlen, UIO_FASTIOV, &iov, &iter);
1261  	if (ret >= 0) {
1262  		ret = do_iter_read(file, &iter, pos, flags);
1263  		kfree(iov);
1264  	}
1265  	if (ret > 0)
1266  		add_rchar(current, ret);
1267  	inc_syscr(current);
1268  	return ret;
1269  }
1270  
1271  static size_t do_compat_readv(compat_ulong_t fd,
1272  				 const struct compat_iovec __user *vec,
1273  				 compat_ulong_t vlen, rwf_t flags)
1274  {
1275  	struct fd f = fdget_pos(fd);
1276  	ssize_t ret;
1277  	loff_t pos;
1278  
1279  	if (!f.file)
1280  		return -EBADF;
1281  	pos = f.file->f_pos;
1282  	ret = compat_readv(f.file, vec, vlen, &pos, flags);
1283  	if (ret >= 0)
1284  		f.file->f_pos = pos;
1285  	fdput_pos(f);
1286  	return ret;
1287  
1288  }
1289  
1290  COMPAT_SYSCALL_DEFINE3(readv, compat_ulong_t, fd,
1291  		const struct compat_iovec __user *,vec,
1292  		compat_ulong_t, vlen)
1293  {
1294  	return do_compat_readv(fd, vec, vlen, 0);
1295  }
1296  
1297  static long do_compat_preadv64(unsigned long fd,
1298  				  const struct compat_iovec __user *vec,
1299  				  unsigned long vlen, loff_t pos, rwf_t flags)
1300  {
1301  	struct fd f;
1302  	ssize_t ret;
1303  
1304  	if (pos < 0)
1305  		return -EINVAL;
1306  	f = fdget(fd);
1307  	if (!f.file)
1308  		return -EBADF;
1309  	ret = -ESPIPE;
1310  	if (f.file->f_mode & FMODE_PREAD)
1311  		ret = compat_readv(f.file, vec, vlen, &pos, flags);
1312  	fdput(f);
1313  	return ret;
1314  }
1315  
1316  #ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
1317  COMPAT_SYSCALL_DEFINE4(preadv64, unsigned long, fd,
1318  		const struct compat_iovec __user *,vec,
1319  		unsigned long, vlen, loff_t, pos)
1320  {
1321  	return do_compat_preadv64(fd, vec, vlen, pos, 0);
1322  }
1323  #endif
1324  
1325  COMPAT_SYSCALL_DEFINE5(preadv, compat_ulong_t, fd,
1326  		const struct compat_iovec __user *,vec,
1327  		compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
1328  {
1329  	loff_t pos = ((loff_t)pos_high << 32) | pos_low;
1330  
1331  	return do_compat_preadv64(fd, vec, vlen, pos, 0);
1332  }
1333  
1334  #ifdef __ARCH_WANT_COMPAT_SYS_PREADV64V2
1335  COMPAT_SYSCALL_DEFINE5(preadv64v2, unsigned long, fd,
1336  		const struct compat_iovec __user *,vec,
1337  		unsigned long, vlen, loff_t, pos, rwf_t, flags)
1338  {
1339  	if (pos == -1)
1340  		return do_compat_readv(fd, vec, vlen, flags);
1341  
1342  	return do_compat_preadv64(fd, vec, vlen, pos, flags);
1343  }
1344  #endif
1345  
1346  COMPAT_SYSCALL_DEFINE6(preadv2, compat_ulong_t, fd,
1347  		const struct compat_iovec __user *,vec,
1348  		compat_ulong_t, vlen, u32, pos_low, u32, pos_high,
1349  		rwf_t, flags)
1350  {
1351  	loff_t pos = ((loff_t)pos_high << 32) | pos_low;
1352  
1353  	if (pos == -1)
1354  		return do_compat_readv(fd, vec, vlen, flags);
1355  
1356  	return do_compat_preadv64(fd, vec, vlen, pos, flags);
1357  }
1358  
1359  static size_t compat_writev(struct file *file,
1360  			    const struct compat_iovec __user *vec,
1361  			    unsigned long vlen, loff_t *pos, rwf_t flags)
1362  {
1363  	struct iovec iovstack[UIO_FASTIOV];
1364  	struct iovec *iov = iovstack;
1365  	struct iov_iter iter;
1366  	ssize_t ret;
1367  
1368  	ret = compat_import_iovec(WRITE, vec, vlen, UIO_FASTIOV, &iov, &iter);
1369  	if (ret >= 0) {
1370  		file_start_write(file);
1371  		ret = do_iter_write(file, &iter, pos, flags);
1372  		file_end_write(file);
1373  		kfree(iov);
1374  	}
1375  	if (ret > 0)
1376  		add_wchar(current, ret);
1377  	inc_syscw(current);
1378  	return ret;
1379  }
1380  
1381  static size_t do_compat_writev(compat_ulong_t fd,
1382  				  const struct compat_iovec __user* vec,
1383  				  compat_ulong_t vlen, rwf_t flags)
1384  {
1385  	struct fd f = fdget_pos(fd);
1386  	ssize_t ret;
1387  	loff_t pos;
1388  
1389  	if (!f.file)
1390  		return -EBADF;
1391  	pos = f.file->f_pos;
1392  	ret = compat_writev(f.file, vec, vlen, &pos, flags);
1393  	if (ret >= 0)
1394  		f.file->f_pos = pos;
1395  	fdput_pos(f);
1396  	return ret;
1397  }
1398  
1399  COMPAT_SYSCALL_DEFINE3(writev, compat_ulong_t, fd,
1400  		const struct compat_iovec __user *, vec,
1401  		compat_ulong_t, vlen)
1402  {
1403  	return do_compat_writev(fd, vec, vlen, 0);
1404  }
1405  
1406  static long do_compat_pwritev64(unsigned long fd,
1407  				   const struct compat_iovec __user *vec,
1408  				   unsigned long vlen, loff_t pos, rwf_t flags)
1409  {
1410  	struct fd f;
1411  	ssize_t ret;
1412  
1413  	if (pos < 0)
1414  		return -EINVAL;
1415  	f = fdget(fd);
1416  	if (!f.file)
1417  		return -EBADF;
1418  	ret = -ESPIPE;
1419  	if (f.file->f_mode & FMODE_PWRITE)
1420  		ret = compat_writev(f.file, vec, vlen, &pos, flags);
1421  	fdput(f);
1422  	return ret;
1423  }
1424  
1425  #ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64
1426  COMPAT_SYSCALL_DEFINE4(pwritev64, unsigned long, fd,
1427  		const struct compat_iovec __user *,vec,
1428  		unsigned long, vlen, loff_t, pos)
1429  {
1430  	return do_compat_pwritev64(fd, vec, vlen, pos, 0);
1431  }
1432  #endif
1433  
1434  COMPAT_SYSCALL_DEFINE5(pwritev, compat_ulong_t, fd,
1435  		const struct compat_iovec __user *,vec,
1436  		compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
1437  {
1438  	loff_t pos = ((loff_t)pos_high << 32) | pos_low;
1439  
1440  	return do_compat_pwritev64(fd, vec, vlen, pos, 0);
1441  }
1442  
1443  #ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64V2
1444  COMPAT_SYSCALL_DEFINE5(pwritev64v2, unsigned long, fd,
1445  		const struct compat_iovec __user *,vec,
1446  		unsigned long, vlen, loff_t, pos, rwf_t, flags)
1447  {
1448  	if (pos == -1)
1449  		return do_compat_writev(fd, vec, vlen, flags);
1450  
1451  	return do_compat_pwritev64(fd, vec, vlen, pos, flags);
1452  }
1453  #endif
1454  
1455  COMPAT_SYSCALL_DEFINE6(pwritev2, compat_ulong_t, fd,
1456  		const struct compat_iovec __user *,vec,
1457  		compat_ulong_t, vlen, u32, pos_low, u32, pos_high, rwf_t, flags)
1458  {
1459  	loff_t pos = ((loff_t)pos_high << 32) | pos_low;
1460  
1461  	if (pos == -1)
1462  		return do_compat_writev(fd, vec, vlen, flags);
1463  
1464  	return do_compat_pwritev64(fd, vec, vlen, pos, flags);
1465  }
1466  
1467  #endif
1468  
1469  static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
1470  		  	   size_t count, loff_t max)
1471  {
1472  	struct fd in, out;
1473  	struct inode *in_inode, *out_inode;
1474  	loff_t pos;
1475  	loff_t out_pos;
1476  	ssize_t retval;
1477  	int fl;
1478  
1479  	/*
1480  	 * Get input file, and verify that it is ok..
1481  	 */
1482  	retval = -EBADF;
1483  	in = fdget(in_fd);
1484  	if (!in.file)
1485  		goto out;
1486  	if (!(in.file->f_mode & FMODE_READ))
1487  		goto fput_in;
1488  	retval = -ESPIPE;
1489  	if (!ppos) {
1490  		pos = in.file->f_pos;
1491  	} else {
1492  		pos = *ppos;
1493  		if (!(in.file->f_mode & FMODE_PREAD))
1494  			goto fput_in;
1495  	}
1496  	retval = rw_verify_area(READ, in.file, &pos, count);
1497  	if (retval < 0)
1498  		goto fput_in;
1499  	if (count > MAX_RW_COUNT)
1500  		count =  MAX_RW_COUNT;
1501  
1502  	/*
1503  	 * Get output file, and verify that it is ok..
1504  	 */
1505  	retval = -EBADF;
1506  	out = fdget(out_fd);
1507  	if (!out.file)
1508  		goto fput_in;
1509  	if (!(out.file->f_mode & FMODE_WRITE))
1510  		goto fput_out;
1511  	in_inode = file_inode(in.file);
1512  	out_inode = file_inode(out.file);
1513  	out_pos = out.file->f_pos;
1514  	retval = rw_verify_area(WRITE, out.file, &out_pos, count);
1515  	if (retval < 0)
1516  		goto fput_out;
1517  
1518  	if (!max)
1519  		max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes);
1520  
1521  	if (unlikely(pos + count > max)) {
1522  		retval = -EOVERFLOW;
1523  		if (pos >= max)
1524  			goto fput_out;
1525  		count = max - pos;
1526  	}
1527  
1528  	fl = 0;
1529  #if 0
1530  	/*
1531  	 * We need to debate whether we can enable this or not. The
1532  	 * man page documents EAGAIN return for the output at least,
1533  	 * and the application is arguably buggy if it doesn't expect
1534  	 * EAGAIN on a non-blocking file descriptor.
1535  	 */
1536  	if (in.file->f_flags & O_NONBLOCK)
1537  		fl = SPLICE_F_NONBLOCK;
1538  #endif
1539  	file_start_write(out.file);
1540  	retval = do_splice_direct(in.file, &pos, out.file, &out_pos, count, fl);
1541  	file_end_write(out.file);
1542  
1543  	if (retval > 0) {
1544  		add_rchar(current, retval);
1545  		add_wchar(current, retval);
1546  		fsnotify_access(in.file);
1547  		fsnotify_modify(out.file);
1548  		out.file->f_pos = out_pos;
1549  		if (ppos)
1550  			*ppos = pos;
1551  		else
1552  			in.file->f_pos = pos;
1553  	}
1554  
1555  	inc_syscr(current);
1556  	inc_syscw(current);
1557  	if (pos > max)
1558  		retval = -EOVERFLOW;
1559  
1560  fput_out:
1561  	fdput(out);
1562  fput_in:
1563  	fdput(in);
1564  out:
1565  	return retval;
1566  }
1567  
1568  SYSCALL_DEFINE4(sendfile, int, out_fd, int, in_fd, off_t __user *, offset, size_t, count)
1569  {
1570  	loff_t pos;
1571  	off_t off;
1572  	ssize_t ret;
1573  
1574  	if (offset) {
1575  		if (unlikely(get_user(off, offset)))
1576  			return -EFAULT;
1577  		pos = off;
1578  		ret = do_sendfile(out_fd, in_fd, &pos, count, MAX_NON_LFS);
1579  		if (unlikely(put_user(pos, offset)))
1580  			return -EFAULT;
1581  		return ret;
1582  	}
1583  
1584  	return do_sendfile(out_fd, in_fd, NULL, count, 0);
1585  }
1586  
1587  SYSCALL_DEFINE4(sendfile64, int, out_fd, int, in_fd, loff_t __user *, offset, size_t, count)
1588  {
1589  	loff_t pos;
1590  	ssize_t ret;
1591  
1592  	if (offset) {
1593  		if (unlikely(copy_from_user(&pos, offset, sizeof(loff_t))))
1594  			return -EFAULT;
1595  		ret = do_sendfile(out_fd, in_fd, &pos, count, 0);
1596  		if (unlikely(put_user(pos, offset)))
1597  			return -EFAULT;
1598  		return ret;
1599  	}
1600  
1601  	return do_sendfile(out_fd, in_fd, NULL, count, 0);
1602  }
1603  
1604  #ifdef CONFIG_COMPAT
1605  COMPAT_SYSCALL_DEFINE4(sendfile, int, out_fd, int, in_fd,
1606  		compat_off_t __user *, offset, compat_size_t, count)
1607  {
1608  	loff_t pos;
1609  	off_t off;
1610  	ssize_t ret;
1611  
1612  	if (offset) {
1613  		if (unlikely(get_user(off, offset)))
1614  			return -EFAULT;
1615  		pos = off;
1616  		ret = do_sendfile(out_fd, in_fd, &pos, count, MAX_NON_LFS);
1617  		if (unlikely(put_user(pos, offset)))
1618  			return -EFAULT;
1619  		return ret;
1620  	}
1621  
1622  	return do_sendfile(out_fd, in_fd, NULL, count, 0);
1623  }
1624  
1625  COMPAT_SYSCALL_DEFINE4(sendfile64, int, out_fd, int, in_fd,
1626  		compat_loff_t __user *, offset, compat_size_t, count)
1627  {
1628  	loff_t pos;
1629  	ssize_t ret;
1630  
1631  	if (offset) {
1632  		if (unlikely(copy_from_user(&pos, offset, sizeof(loff_t))))
1633  			return -EFAULT;
1634  		ret = do_sendfile(out_fd, in_fd, &pos, count, 0);
1635  		if (unlikely(put_user(pos, offset)))
1636  			return -EFAULT;
1637  		return ret;
1638  	}
1639  
1640  	return do_sendfile(out_fd, in_fd, NULL, count, 0);
1641  }
1642  #endif
1643  
1644  /**
1645   * generic_copy_file_range - copy data between two files
1646   * @file_in:	file structure to read from
1647   * @pos_in:	file offset to read from
1648   * @file_out:	file structure to write data to
1649   * @pos_out:	file offset to write data to
1650   * @len:	amount of data to copy
1651   * @flags:	copy flags
1652   *
1653   * This is a generic filesystem helper to copy data from one file to another.
1654   * It has no constraints on the source or destination file owners - the files
1655   * can belong to different superblocks and different filesystem types. Short
1656   * copies are allowed.
1657   *
1658   * This should be called from the @file_out filesystem, as per the
1659   * ->copy_file_range() method.
1660   *
1661   * Returns the number of bytes copied or a negative error indicating the
1662   * failure.
1663   */
1664  
1665  ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in,
1666  				struct file *file_out, loff_t pos_out,
1667  				size_t len, unsigned int flags)
1668  {
1669  	return do_splice_direct(file_in, &pos_in, file_out, &pos_out,
1670  				len > MAX_RW_COUNT ? MAX_RW_COUNT : len, 0);
1671  }
1672  EXPORT_SYMBOL(generic_copy_file_range);
1673  
1674  static ssize_t do_copy_file_range(struct file *file_in, loff_t pos_in,
1675  				  struct file *file_out, loff_t pos_out,
1676  				  size_t len, unsigned int flags)
1677  {
1678  	/*
1679  	 * Although we now allow filesystems to handle cross sb copy, passing
1680  	 * a file of the wrong filesystem type to filesystem driver can result
1681  	 * in an attempt to dereference the wrong type of ->private_data, so
1682  	 * avoid doing that until we really have a good reason.  NFS defines
1683  	 * several different file_system_type structures, but they all end up
1684  	 * using the same ->copy_file_range() function pointer.
1685  	 */
1686  	if (file_out->f_op->copy_file_range &&
1687  	    file_out->f_op->copy_file_range == file_in->f_op->copy_file_range)
1688  		return file_out->f_op->copy_file_range(file_in, pos_in,
1689  						       file_out, pos_out,
1690  						       len, flags);
1691  
1692  	return generic_copy_file_range(file_in, pos_in, file_out, pos_out, len,
1693  				       flags);
1694  }
1695  
1696  /*
1697   * copy_file_range() differs from regular file read and write in that it
1698   * specifically allows return partial success.  When it does so is up to
1699   * the copy_file_range method.
1700   */
1701  ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in,
1702  			    struct file *file_out, loff_t pos_out,
1703  			    size_t len, unsigned int flags)
1704  {
1705  	ssize_t ret;
1706  
1707  	if (flags != 0)
1708  		return -EINVAL;
1709  
1710  	ret = generic_copy_file_checks(file_in, pos_in, file_out, pos_out, &len,
1711  				       flags);
1712  	if (unlikely(ret))
1713  		return ret;
1714  
1715  	ret = rw_verify_area(READ, file_in, &pos_in, len);
1716  	if (unlikely(ret))
1717  		return ret;
1718  
1719  	ret = rw_verify_area(WRITE, file_out, &pos_out, len);
1720  	if (unlikely(ret))
1721  		return ret;
1722  
1723  	if (len == 0)
1724  		return 0;
1725  
1726  	file_start_write(file_out);
1727  
1728  	/*
1729  	 * Try cloning first, this is supported by more file systems, and
1730  	 * more efficient if both clone and copy are supported (e.g. NFS).
1731  	 */
1732  	if (file_in->f_op->remap_file_range &&
1733  	    file_inode(file_in)->i_sb == file_inode(file_out)->i_sb) {
1734  		loff_t cloned;
1735  
1736  		cloned = file_in->f_op->remap_file_range(file_in, pos_in,
1737  				file_out, pos_out,
1738  				min_t(loff_t, MAX_RW_COUNT, len),
1739  				REMAP_FILE_CAN_SHORTEN);
1740  		if (cloned > 0) {
1741  			ret = cloned;
1742  			goto done;
1743  		}
1744  	}
1745  
1746  	ret = do_copy_file_range(file_in, pos_in, file_out, pos_out, len,
1747  				flags);
1748  	WARN_ON_ONCE(ret == -EOPNOTSUPP);
1749  done:
1750  	if (ret > 0) {
1751  		fsnotify_access(file_in);
1752  		add_rchar(current, ret);
1753  		fsnotify_modify(file_out);
1754  		add_wchar(current, ret);
1755  	}
1756  
1757  	inc_syscr(current);
1758  	inc_syscw(current);
1759  
1760  	file_end_write(file_out);
1761  
1762  	return ret;
1763  }
1764  EXPORT_SYMBOL(vfs_copy_file_range);
1765  
1766  SYSCALL_DEFINE6(copy_file_range, int, fd_in, loff_t __user *, off_in,
1767  		int, fd_out, loff_t __user *, off_out,
1768  		size_t, len, unsigned int, flags)
1769  {
1770  	loff_t pos_in;
1771  	loff_t pos_out;
1772  	struct fd f_in;
1773  	struct fd f_out;
1774  	ssize_t ret = -EBADF;
1775  
1776  	f_in = fdget(fd_in);
1777  	if (!f_in.file)
1778  		goto out2;
1779  
1780  	f_out = fdget(fd_out);
1781  	if (!f_out.file)
1782  		goto out1;
1783  
1784  	ret = -EFAULT;
1785  	if (off_in) {
1786  		if (copy_from_user(&pos_in, off_in, sizeof(loff_t)))
1787  			goto out;
1788  	} else {
1789  		pos_in = f_in.file->f_pos;
1790  	}
1791  
1792  	if (off_out) {
1793  		if (copy_from_user(&pos_out, off_out, sizeof(loff_t)))
1794  			goto out;
1795  	} else {
1796  		pos_out = f_out.file->f_pos;
1797  	}
1798  
1799  	ret = vfs_copy_file_range(f_in.file, pos_in, f_out.file, pos_out, len,
1800  				  flags);
1801  	if (ret > 0) {
1802  		pos_in += ret;
1803  		pos_out += ret;
1804  
1805  		if (off_in) {
1806  			if (copy_to_user(off_in, &pos_in, sizeof(loff_t)))
1807  				ret = -EFAULT;
1808  		} else {
1809  			f_in.file->f_pos = pos_in;
1810  		}
1811  
1812  		if (off_out) {
1813  			if (copy_to_user(off_out, &pos_out, sizeof(loff_t)))
1814  				ret = -EFAULT;
1815  		} else {
1816  			f_out.file->f_pos = pos_out;
1817  		}
1818  	}
1819  
1820  out:
1821  	fdput(f_out);
1822  out1:
1823  	fdput(f_in);
1824  out2:
1825  	return ret;
1826  }
1827  
1828  static int remap_verify_area(struct file *file, loff_t pos, loff_t len,
1829  			     bool write)
1830  {
1831  	struct inode *inode = file_inode(file);
1832  
1833  	if (unlikely(pos < 0 || len < 0))
1834  		return -EINVAL;
1835  
1836  	 if (unlikely((loff_t) (pos + len) < 0))
1837  		return -EINVAL;
1838  
1839  	if (unlikely(inode->i_flctx && mandatory_lock(inode))) {
1840  		loff_t end = len ? pos + len - 1 : OFFSET_MAX;
1841  		int retval;
1842  
1843  		retval = locks_mandatory_area(inode, file, pos, end,
1844  				write ? F_WRLCK : F_RDLCK);
1845  		if (retval < 0)
1846  			return retval;
1847  	}
1848  
1849  	return security_file_permission(file, write ? MAY_WRITE : MAY_READ);
1850  }
1851  /*
1852   * Ensure that we don't remap a partial EOF block in the middle of something
1853   * else.  Assume that the offsets have already been checked for block
1854   * alignment.
1855   *
1856   * For clone we only link a partial EOF block above or at the destination file's
1857   * EOF.  For deduplication we accept a partial EOF block only if it ends at the
1858   * destination file's EOF (can not link it into the middle of a file).
1859   *
1860   * Shorten the request if possible.
1861   */
1862  static int generic_remap_check_len(struct inode *inode_in,
1863  				   struct inode *inode_out,
1864  				   loff_t pos_out,
1865  				   loff_t *len,
1866  				   unsigned int remap_flags)
1867  {
1868  	u64 blkmask = i_blocksize(inode_in) - 1;
1869  	loff_t new_len = *len;
1870  
1871  	if ((*len & blkmask) == 0)
1872  		return 0;
1873  
1874  	if (pos_out + *len < i_size_read(inode_out))
1875  		new_len &= ~blkmask;
1876  
1877  	if (new_len == *len)
1878  		return 0;
1879  
1880  	if (remap_flags & REMAP_FILE_CAN_SHORTEN) {
1881  		*len = new_len;
1882  		return 0;
1883  	}
1884  
1885  	return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL;
1886  }
1887  
1888  /* Read a page's worth of file data into the page cache. */
1889  static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
1890  {
1891  	struct page *page;
1892  
1893  	page = read_mapping_page(inode->i_mapping, offset >> PAGE_SHIFT, NULL);
1894  	if (IS_ERR(page))
1895  		return page;
1896  	if (!PageUptodate(page)) {
1897  		put_page(page);
1898  		return ERR_PTR(-EIO);
1899  	}
1900  	return page;
1901  }
1902  
1903  /*
1904   * Lock two pages, ensuring that we lock in offset order if the pages are from
1905   * the same file.
1906   */
1907  static void vfs_lock_two_pages(struct page *page1, struct page *page2)
1908  {
1909  	/* Always lock in order of increasing index. */
1910  	if (page1->index > page2->index)
1911  		swap(page1, page2);
1912  
1913  	lock_page(page1);
1914  	if (page1 != page2)
1915  		lock_page(page2);
1916  }
1917  
1918  /* Unlock two pages, being careful not to unlock the same page twice. */
1919  static void vfs_unlock_two_pages(struct page *page1, struct page *page2)
1920  {
1921  	unlock_page(page1);
1922  	if (page1 != page2)
1923  		unlock_page(page2);
1924  }
1925  
1926  /*
1927   * Compare extents of two files to see if they are the same.
1928   * Caller must have locked both inodes to prevent write races.
1929   */
1930  static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
1931  					 struct inode *dest, loff_t destoff,
1932  					 loff_t len, bool *is_same)
1933  {
1934  	loff_t src_poff;
1935  	loff_t dest_poff;
1936  	void *src_addr;
1937  	void *dest_addr;
1938  	struct page *src_page;
1939  	struct page *dest_page;
1940  	loff_t cmp_len;
1941  	bool same;
1942  	int error;
1943  
1944  	error = -EINVAL;
1945  	same = true;
1946  	while (len) {
1947  		src_poff = srcoff & (PAGE_SIZE - 1);
1948  		dest_poff = destoff & (PAGE_SIZE - 1);
1949  		cmp_len = min(PAGE_SIZE - src_poff,
1950  			      PAGE_SIZE - dest_poff);
1951  		cmp_len = min(cmp_len, len);
1952  		if (cmp_len <= 0)
1953  			goto out_error;
1954  
1955  		src_page = vfs_dedupe_get_page(src, srcoff);
1956  		if (IS_ERR(src_page)) {
1957  			error = PTR_ERR(src_page);
1958  			goto out_error;
1959  		}
1960  		dest_page = vfs_dedupe_get_page(dest, destoff);
1961  		if (IS_ERR(dest_page)) {
1962  			error = PTR_ERR(dest_page);
1963  			put_page(src_page);
1964  			goto out_error;
1965  		}
1966  
1967  		vfs_lock_two_pages(src_page, dest_page);
1968  
1969  		/*
1970  		 * Now that we've locked both pages, make sure they're still
1971  		 * mapped to the file data we're interested in.  If not,
1972  		 * someone is invalidating pages on us and we lose.
1973  		 */
1974  		if (!PageUptodate(src_page) || !PageUptodate(dest_page) ||
1975  		    src_page->mapping != src->i_mapping ||
1976  		    dest_page->mapping != dest->i_mapping) {
1977  			same = false;
1978  			goto unlock;
1979  		}
1980  
1981  		src_addr = kmap_atomic(src_page);
1982  		dest_addr = kmap_atomic(dest_page);
1983  
1984  		flush_dcache_page(src_page);
1985  		flush_dcache_page(dest_page);
1986  
1987  		if (memcmp(src_addr + src_poff, dest_addr + dest_poff, cmp_len))
1988  			same = false;
1989  
1990  		kunmap_atomic(dest_addr);
1991  		kunmap_atomic(src_addr);
1992  unlock:
1993  		vfs_unlock_two_pages(src_page, dest_page);
1994  		put_page(dest_page);
1995  		put_page(src_page);
1996  
1997  		if (!same)
1998  			break;
1999  
2000  		srcoff += cmp_len;
2001  		destoff += cmp_len;
2002  		len -= cmp_len;
2003  	}
2004  
2005  	*is_same = same;
2006  	return 0;
2007  
2008  out_error:
2009  	return error;
2010  }
2011  
2012  /*
2013   * Check that the two inodes are eligible for cloning, the ranges make
2014   * sense, and then flush all dirty data.  Caller must ensure that the
2015   * inodes have been locked against any other modifications.
2016   *
2017   * If there's an error, then the usual negative error code is returned.
2018   * Otherwise returns 0 with *len set to the request length.
2019   */
2020  int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
2021  				  struct file *file_out, loff_t pos_out,
2022  				  loff_t *len, unsigned int remap_flags)
2023  {
2024  	struct inode *inode_in = file_inode(file_in);
2025  	struct inode *inode_out = file_inode(file_out);
2026  	bool same_inode = (inode_in == inode_out);
2027  	int ret;
2028  
2029  	/* Don't touch certain kinds of inodes */
2030  	if (IS_IMMUTABLE(inode_out))
2031  		return -EPERM;
2032  
2033  	if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out))
2034  		return -ETXTBSY;
2035  
2036  	/* Don't reflink dirs, pipes, sockets... */
2037  	if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
2038  		return -EISDIR;
2039  	if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
2040  		return -EINVAL;
2041  
2042  	/* Zero length dedupe exits immediately; reflink goes to EOF. */
2043  	if (*len == 0) {
2044  		loff_t isize = i_size_read(inode_in);
2045  
2046  		if ((remap_flags & REMAP_FILE_DEDUP) || pos_in == isize)
2047  			return 0;
2048  		if (pos_in > isize)
2049  			return -EINVAL;
2050  		*len = isize - pos_in;
2051  		if (*len == 0)
2052  			return 0;
2053  	}
2054  
2055  	/* Check that we don't violate system file offset limits. */
2056  	ret = generic_remap_checks(file_in, pos_in, file_out, pos_out, len,
2057  			remap_flags);
2058  	if (ret)
2059  		return ret;
2060  
2061  	/* Wait for the completion of any pending IOs on both files */
2062  	inode_dio_wait(inode_in);
2063  	if (!same_inode)
2064  		inode_dio_wait(inode_out);
2065  
2066  	ret = filemap_write_and_wait_range(inode_in->i_mapping,
2067  			pos_in, pos_in + *len - 1);
2068  	if (ret)
2069  		return ret;
2070  
2071  	ret = filemap_write_and_wait_range(inode_out->i_mapping,
2072  			pos_out, pos_out + *len - 1);
2073  	if (ret)
2074  		return ret;
2075  
2076  	/*
2077  	 * Check that the extents are the same.
2078  	 */
2079  	if (remap_flags & REMAP_FILE_DEDUP) {
2080  		bool		is_same = false;
2081  
2082  		ret = vfs_dedupe_file_range_compare(inode_in, pos_in,
2083  				inode_out, pos_out, *len, &is_same);
2084  		if (ret)
2085  			return ret;
2086  		if (!is_same)
2087  			return -EBADE;
2088  	}
2089  
2090  	ret = generic_remap_check_len(inode_in, inode_out, pos_out, len,
2091  			remap_flags);
2092  	if (ret)
2093  		return ret;
2094  
2095  	/* If can't alter the file contents, we're done. */
2096  	if (!(remap_flags & REMAP_FILE_DEDUP))
2097  		ret = file_modified(file_out);
2098  
2099  	return ret;
2100  }
2101  EXPORT_SYMBOL(generic_remap_file_range_prep);
2102  
2103  loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
2104  			   struct file *file_out, loff_t pos_out,
2105  			   loff_t len, unsigned int remap_flags)
2106  {
2107  	loff_t ret;
2108  
2109  	WARN_ON_ONCE(remap_flags & REMAP_FILE_DEDUP);
2110  
2111  	/*
2112  	 * FICLONE/FICLONERANGE ioctls enforce that src and dest files are on
2113  	 * the same mount. Practically, they only need to be on the same file
2114  	 * system.
2115  	 */
2116  	if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb)
2117  		return -EXDEV;
2118  
2119  	ret = generic_file_rw_checks(file_in, file_out);
2120  	if (ret < 0)
2121  		return ret;
2122  
2123  	if (!file_in->f_op->remap_file_range)
2124  		return -EOPNOTSUPP;
2125  
2126  	ret = remap_verify_area(file_in, pos_in, len, false);
2127  	if (ret)
2128  		return ret;
2129  
2130  	ret = remap_verify_area(file_out, pos_out, len, true);
2131  	if (ret)
2132  		return ret;
2133  
2134  	ret = file_in->f_op->remap_file_range(file_in, pos_in,
2135  			file_out, pos_out, len, remap_flags);
2136  	if (ret < 0)
2137  		return ret;
2138  
2139  	fsnotify_access(file_in);
2140  	fsnotify_modify(file_out);
2141  	return ret;
2142  }
2143  EXPORT_SYMBOL(do_clone_file_range);
2144  
2145  loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
2146  			    struct file *file_out, loff_t pos_out,
2147  			    loff_t len, unsigned int remap_flags)
2148  {
2149  	loff_t ret;
2150  
2151  	file_start_write(file_out);
2152  	ret = do_clone_file_range(file_in, pos_in, file_out, pos_out, len,
2153  				  remap_flags);
2154  	file_end_write(file_out);
2155  
2156  	return ret;
2157  }
2158  EXPORT_SYMBOL(vfs_clone_file_range);
2159  
2160  /* Check whether we are allowed to dedupe the destination file */
2161  static bool allow_file_dedupe(struct file *file)
2162  {
2163  	if (capable(CAP_SYS_ADMIN))
2164  		return true;
2165  	if (file->f_mode & FMODE_WRITE)
2166  		return true;
2167  	if (uid_eq(current_fsuid(), file_inode(file)->i_uid))
2168  		return true;
2169  	if (!inode_permission(file_inode(file), MAY_WRITE))
2170  		return true;
2171  	return false;
2172  }
2173  
2174  loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
2175  				 struct file *dst_file, loff_t dst_pos,
2176  				 loff_t len, unsigned int remap_flags)
2177  {
2178  	loff_t ret;
2179  
2180  	WARN_ON_ONCE(remap_flags & ~(REMAP_FILE_DEDUP |
2181  				     REMAP_FILE_CAN_SHORTEN));
2182  
2183  	ret = mnt_want_write_file(dst_file);
2184  	if (ret)
2185  		return ret;
2186  
2187  	ret = remap_verify_area(dst_file, dst_pos, len, true);
2188  	if (ret < 0)
2189  		goto out_drop_write;
2190  
2191  	ret = -EPERM;
2192  	if (!allow_file_dedupe(dst_file))
2193  		goto out_drop_write;
2194  
2195  	ret = -EXDEV;
2196  	if (src_file->f_path.mnt != dst_file->f_path.mnt)
2197  		goto out_drop_write;
2198  
2199  	ret = -EISDIR;
2200  	if (S_ISDIR(file_inode(dst_file)->i_mode))
2201  		goto out_drop_write;
2202  
2203  	ret = -EINVAL;
2204  	if (!dst_file->f_op->remap_file_range)
2205  		goto out_drop_write;
2206  
2207  	if (len == 0) {
2208  		ret = 0;
2209  		goto out_drop_write;
2210  	}
2211  
2212  	ret = dst_file->f_op->remap_file_range(src_file, src_pos, dst_file,
2213  			dst_pos, len, remap_flags | REMAP_FILE_DEDUP);
2214  out_drop_write:
2215  	mnt_drop_write_file(dst_file);
2216  
2217  	return ret;
2218  }
2219  EXPORT_SYMBOL(vfs_dedupe_file_range_one);
2220  
2221  int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
2222  {
2223  	struct file_dedupe_range_info *info;
2224  	struct inode *src = file_inode(file);
2225  	u64 off;
2226  	u64 len;
2227  	int i;
2228  	int ret;
2229  	u16 count = same->dest_count;
2230  	loff_t deduped;
2231  
2232  	if (!(file->f_mode & FMODE_READ))
2233  		return -EINVAL;
2234  
2235  	if (same->reserved1 || same->reserved2)
2236  		return -EINVAL;
2237  
2238  	off = same->src_offset;
2239  	len = same->src_length;
2240  
2241  	if (S_ISDIR(src->i_mode))
2242  		return -EISDIR;
2243  
2244  	if (!S_ISREG(src->i_mode))
2245  		return -EINVAL;
2246  
2247  	if (!file->f_op->remap_file_range)
2248  		return -EOPNOTSUPP;
2249  
2250  	ret = remap_verify_area(file, off, len, false);
2251  	if (ret < 0)
2252  		return ret;
2253  	ret = 0;
2254  
2255  	if (off + len > i_size_read(src))
2256  		return -EINVAL;
2257  
2258  	/* Arbitrary 1G limit on a single dedupe request, can be raised. */
2259  	len = min_t(u64, len, 1 << 30);
2260  
2261  	/* pre-format output fields to sane values */
2262  	for (i = 0; i < count; i++) {
2263  		same->info[i].bytes_deduped = 0ULL;
2264  		same->info[i].status = FILE_DEDUPE_RANGE_SAME;
2265  	}
2266  
2267  	for (i = 0, info = same->info; i < count; i++, info++) {
2268  		struct fd dst_fd = fdget(info->dest_fd);
2269  		struct file *dst_file = dst_fd.file;
2270  
2271  		if (!dst_file) {
2272  			info->status = -EBADF;
2273  			goto next_loop;
2274  		}
2275  
2276  		if (info->reserved) {
2277  			info->status = -EINVAL;
2278  			goto next_fdput;
2279  		}
2280  
2281  		deduped = vfs_dedupe_file_range_one(file, off, dst_file,
2282  						    info->dest_offset, len,
2283  						    REMAP_FILE_CAN_SHORTEN);
2284  		if (deduped == -EBADE)
2285  			info->status = FILE_DEDUPE_RANGE_DIFFERS;
2286  		else if (deduped < 0)
2287  			info->status = deduped;
2288  		else
2289  			info->bytes_deduped = len;
2290  
2291  next_fdput:
2292  		fdput(dst_fd);
2293  next_loop:
2294  		if (fatal_signal_pending(current))
2295  			break;
2296  	}
2297  	return ret;
2298  }
2299  EXPORT_SYMBOL(vfs_dedupe_file_range);
2300