xref: /openbmc/linux/fs/read_write.c (revision 840ef8b7cc584a23c4f9d05352f4dbaf8e56e5ab)
1 /*
2  *  linux/fs/read_write.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6 
7 #include <linux/slab.h>
8 #include <linux/stat.h>
9 #include <linux/fcntl.h>
10 #include <linux/file.h>
11 #include <linux/uio.h>
12 #include <linux/fsnotify.h>
13 #include <linux/security.h>
14 #include <linux/export.h>
15 #include <linux/syscalls.h>
16 #include <linux/pagemap.h>
17 #include <linux/splice.h>
18 #include <linux/compat.h>
19 #include "read_write.h"
20 
21 #include <asm/uaccess.h>
22 #include <asm/unistd.h>
23 
24 const struct file_operations generic_ro_fops = {
25 	.llseek		= generic_file_llseek,
26 	.read		= do_sync_read,
27 	.aio_read	= generic_file_aio_read,
28 	.mmap		= generic_file_readonly_mmap,
29 	.splice_read	= generic_file_splice_read,
30 };
31 
32 EXPORT_SYMBOL(generic_ro_fops);
33 
34 static inline int unsigned_offsets(struct file *file)
35 {
36 	return file->f_mode & FMODE_UNSIGNED_OFFSET;
37 }
38 
39 static loff_t lseek_execute(struct file *file, struct inode *inode,
40 		loff_t offset, loff_t maxsize)
41 {
42 	if (offset < 0 && !unsigned_offsets(file))
43 		return -EINVAL;
44 	if (offset > maxsize)
45 		return -EINVAL;
46 
47 	if (offset != file->f_pos) {
48 		file->f_pos = offset;
49 		file->f_version = 0;
50 	}
51 	return offset;
52 }
53 
54 /**
55  * generic_file_llseek_size - generic llseek implementation for regular files
56  * @file:	file structure to seek on
57  * @offset:	file offset to seek to
58  * @whence:	type of seek
59  * @size:	max size of this file in file system
60  * @eof:	offset used for SEEK_END position
61  *
62  * This is a variant of generic_file_llseek that allows passing in a custom
63  * maximum file size and a custom EOF position, for e.g. hashed directories
64  *
65  * Synchronization:
66  * SEEK_SET and SEEK_END are unsynchronized (but atomic on 64bit platforms)
67  * SEEK_CUR is synchronized against other SEEK_CURs, but not read/writes.
68  * read/writes behave like SEEK_SET against seeks.
69  */
70 loff_t
71 generic_file_llseek_size(struct file *file, loff_t offset, int whence,
72 		loff_t maxsize, loff_t eof)
73 {
74 	struct inode *inode = file->f_mapping->host;
75 
76 	switch (whence) {
77 	case SEEK_END:
78 		offset += eof;
79 		break;
80 	case SEEK_CUR:
81 		/*
82 		 * Here we special-case the lseek(fd, 0, SEEK_CUR)
83 		 * position-querying operation.  Avoid rewriting the "same"
84 		 * f_pos value back to the file because a concurrent read(),
85 		 * write() or lseek() might have altered it
86 		 */
87 		if (offset == 0)
88 			return file->f_pos;
89 		/*
90 		 * f_lock protects against read/modify/write race with other
91 		 * SEEK_CURs. Note that parallel writes and reads behave
92 		 * like SEEK_SET.
93 		 */
94 		spin_lock(&file->f_lock);
95 		offset = lseek_execute(file, inode, file->f_pos + offset,
96 				       maxsize);
97 		spin_unlock(&file->f_lock);
98 		return offset;
99 	case SEEK_DATA:
100 		/*
101 		 * In the generic case the entire file is data, so as long as
102 		 * offset isn't at the end of the file then the offset is data.
103 		 */
104 		if (offset >= eof)
105 			return -ENXIO;
106 		break;
107 	case SEEK_HOLE:
108 		/*
109 		 * There is a virtual hole at the end of the file, so as long as
110 		 * offset isn't i_size or larger, return i_size.
111 		 */
112 		if (offset >= eof)
113 			return -ENXIO;
114 		offset = eof;
115 		break;
116 	}
117 
118 	return lseek_execute(file, inode, offset, maxsize);
119 }
120 EXPORT_SYMBOL(generic_file_llseek_size);
121 
122 /**
123  * generic_file_llseek - generic llseek implementation for regular files
124  * @file:	file structure to seek on
125  * @offset:	file offset to seek to
126  * @whence:	type of seek
127  *
128  * This is a generic implemenation of ->llseek useable for all normal local
129  * filesystems.  It just updates the file offset to the value specified by
130  * @offset and @whence under i_mutex.
131  */
132 loff_t generic_file_llseek(struct file *file, loff_t offset, int whence)
133 {
134 	struct inode *inode = file->f_mapping->host;
135 
136 	return generic_file_llseek_size(file, offset, whence,
137 					inode->i_sb->s_maxbytes,
138 					i_size_read(inode));
139 }
140 EXPORT_SYMBOL(generic_file_llseek);
141 
142 /**
143  * noop_llseek - No Operation Performed llseek implementation
144  * @file:	file structure to seek on
145  * @offset:	file offset to seek to
146  * @whence:	type of seek
147  *
148  * This is an implementation of ->llseek useable for the rare special case when
149  * userspace expects the seek to succeed but the (device) file is actually not
150  * able to perform the seek. In this case you use noop_llseek() instead of
151  * falling back to the default implementation of ->llseek.
152  */
153 loff_t noop_llseek(struct file *file, loff_t offset, int whence)
154 {
155 	return file->f_pos;
156 }
157 EXPORT_SYMBOL(noop_llseek);
158 
159 loff_t no_llseek(struct file *file, loff_t offset, int whence)
160 {
161 	return -ESPIPE;
162 }
163 EXPORT_SYMBOL(no_llseek);
164 
165 loff_t default_llseek(struct file *file, loff_t offset, int whence)
166 {
167 	struct inode *inode = file_inode(file);
168 	loff_t retval;
169 
170 	mutex_lock(&inode->i_mutex);
171 	switch (whence) {
172 		case SEEK_END:
173 			offset += i_size_read(inode);
174 			break;
175 		case SEEK_CUR:
176 			if (offset == 0) {
177 				retval = file->f_pos;
178 				goto out;
179 			}
180 			offset += file->f_pos;
181 			break;
182 		case SEEK_DATA:
183 			/*
184 			 * In the generic case the entire file is data, so as
185 			 * long as offset isn't at the end of the file then the
186 			 * offset is data.
187 			 */
188 			if (offset >= inode->i_size) {
189 				retval = -ENXIO;
190 				goto out;
191 			}
192 			break;
193 		case SEEK_HOLE:
194 			/*
195 			 * There is a virtual hole at the end of the file, so
196 			 * as long as offset isn't i_size or larger, return
197 			 * i_size.
198 			 */
199 			if (offset >= inode->i_size) {
200 				retval = -ENXIO;
201 				goto out;
202 			}
203 			offset = inode->i_size;
204 			break;
205 	}
206 	retval = -EINVAL;
207 	if (offset >= 0 || unsigned_offsets(file)) {
208 		if (offset != file->f_pos) {
209 			file->f_pos = offset;
210 			file->f_version = 0;
211 		}
212 		retval = offset;
213 	}
214 out:
215 	mutex_unlock(&inode->i_mutex);
216 	return retval;
217 }
218 EXPORT_SYMBOL(default_llseek);
219 
220 loff_t vfs_llseek(struct file *file, loff_t offset, int whence)
221 {
222 	loff_t (*fn)(struct file *, loff_t, int);
223 
224 	fn = no_llseek;
225 	if (file->f_mode & FMODE_LSEEK) {
226 		if (file->f_op && file->f_op->llseek)
227 			fn = file->f_op->llseek;
228 	}
229 	return fn(file, offset, whence);
230 }
231 EXPORT_SYMBOL(vfs_llseek);
232 
233 SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence)
234 {
235 	off_t retval;
236 	struct fd f = fdget(fd);
237 	if (!f.file)
238 		return -EBADF;
239 
240 	retval = -EINVAL;
241 	if (whence <= SEEK_MAX) {
242 		loff_t res = vfs_llseek(f.file, offset, whence);
243 		retval = res;
244 		if (res != (loff_t)retval)
245 			retval = -EOVERFLOW;	/* LFS: should only happen on 32 bit platforms */
246 	}
247 	fdput(f);
248 	return retval;
249 }
250 
251 #ifdef CONFIG_COMPAT
252 COMPAT_SYSCALL_DEFINE3(lseek, unsigned int, fd, compat_off_t, offset, unsigned int, whence)
253 {
254 	return sys_lseek(fd, offset, whence);
255 }
256 #endif
257 
258 #ifdef __ARCH_WANT_SYS_LLSEEK
259 SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
260 		unsigned long, offset_low, loff_t __user *, result,
261 		unsigned int, whence)
262 {
263 	int retval;
264 	struct fd f = fdget(fd);
265 	loff_t offset;
266 
267 	if (!f.file)
268 		return -EBADF;
269 
270 	retval = -EINVAL;
271 	if (whence > SEEK_MAX)
272 		goto out_putf;
273 
274 	offset = vfs_llseek(f.file, ((loff_t) offset_high << 32) | offset_low,
275 			whence);
276 
277 	retval = (int)offset;
278 	if (offset >= 0) {
279 		retval = -EFAULT;
280 		if (!copy_to_user(result, &offset, sizeof(offset)))
281 			retval = 0;
282 	}
283 out_putf:
284 	fdput(f);
285 	return retval;
286 }
287 #endif
288 
289 /*
290  * rw_verify_area doesn't like huge counts. We limit
291  * them to something that fits in "int" so that others
292  * won't have to do range checks all the time.
293  */
294 int rw_verify_area(int read_write, struct file *file, loff_t *ppos, size_t count)
295 {
296 	struct inode *inode;
297 	loff_t pos;
298 	int retval = -EINVAL;
299 
300 	inode = file_inode(file);
301 	if (unlikely((ssize_t) count < 0))
302 		return retval;
303 	pos = *ppos;
304 	if (unlikely(pos < 0)) {
305 		if (!unsigned_offsets(file))
306 			return retval;
307 		if (count >= -pos) /* both values are in 0..LLONG_MAX */
308 			return -EOVERFLOW;
309 	} else if (unlikely((loff_t) (pos + count) < 0)) {
310 		if (!unsigned_offsets(file))
311 			return retval;
312 	}
313 
314 	if (unlikely(inode->i_flock && mandatory_lock(inode))) {
315 		retval = locks_mandatory_area(
316 			read_write == READ ? FLOCK_VERIFY_READ : FLOCK_VERIFY_WRITE,
317 			inode, file, pos, count);
318 		if (retval < 0)
319 			return retval;
320 	}
321 	retval = security_file_permission(file,
322 				read_write == READ ? MAY_READ : MAY_WRITE);
323 	if (retval)
324 		return retval;
325 	return count > MAX_RW_COUNT ? MAX_RW_COUNT : count;
326 }
327 
328 static void wait_on_retry_sync_kiocb(struct kiocb *iocb)
329 {
330 	set_current_state(TASK_UNINTERRUPTIBLE);
331 	if (!kiocbIsKicked(iocb))
332 		schedule();
333 	else
334 		kiocbClearKicked(iocb);
335 	__set_current_state(TASK_RUNNING);
336 }
337 
338 ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
339 {
340 	struct iovec iov = { .iov_base = buf, .iov_len = len };
341 	struct kiocb kiocb;
342 	ssize_t ret;
343 
344 	init_sync_kiocb(&kiocb, filp);
345 	kiocb.ki_pos = *ppos;
346 	kiocb.ki_left = len;
347 	kiocb.ki_nbytes = len;
348 
349 	for (;;) {
350 		ret = filp->f_op->aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
351 		if (ret != -EIOCBRETRY)
352 			break;
353 		wait_on_retry_sync_kiocb(&kiocb);
354 	}
355 
356 	if (-EIOCBQUEUED == ret)
357 		ret = wait_on_sync_kiocb(&kiocb);
358 	*ppos = kiocb.ki_pos;
359 	return ret;
360 }
361 
362 EXPORT_SYMBOL(do_sync_read);
363 
364 ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
365 {
366 	ssize_t ret;
367 
368 	if (!(file->f_mode & FMODE_READ))
369 		return -EBADF;
370 	if (!file->f_op || (!file->f_op->read && !file->f_op->aio_read))
371 		return -EINVAL;
372 	if (unlikely(!access_ok(VERIFY_WRITE, buf, count)))
373 		return -EFAULT;
374 
375 	ret = rw_verify_area(READ, file, pos, count);
376 	if (ret >= 0) {
377 		count = ret;
378 		if (file->f_op->read)
379 			ret = file->f_op->read(file, buf, count, pos);
380 		else
381 			ret = do_sync_read(file, buf, count, pos);
382 		if (ret > 0) {
383 			fsnotify_access(file);
384 			add_rchar(current, ret);
385 		}
386 		inc_syscr(current);
387 	}
388 
389 	return ret;
390 }
391 
392 EXPORT_SYMBOL(vfs_read);
393 
394 ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
395 {
396 	struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
397 	struct kiocb kiocb;
398 	ssize_t ret;
399 
400 	init_sync_kiocb(&kiocb, filp);
401 	kiocb.ki_pos = *ppos;
402 	kiocb.ki_left = len;
403 	kiocb.ki_nbytes = len;
404 
405 	for (;;) {
406 		ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
407 		if (ret != -EIOCBRETRY)
408 			break;
409 		wait_on_retry_sync_kiocb(&kiocb);
410 	}
411 
412 	if (-EIOCBQUEUED == ret)
413 		ret = wait_on_sync_kiocb(&kiocb);
414 	*ppos = kiocb.ki_pos;
415 	return ret;
416 }
417 
418 EXPORT_SYMBOL(do_sync_write);
419 
420 ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
421 {
422 	ssize_t ret;
423 
424 	if (!(file->f_mode & FMODE_WRITE))
425 		return -EBADF;
426 	if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write))
427 		return -EINVAL;
428 	if (unlikely(!access_ok(VERIFY_READ, buf, count)))
429 		return -EFAULT;
430 
431 	ret = rw_verify_area(WRITE, file, pos, count);
432 	if (ret >= 0) {
433 		count = ret;
434 		if (file->f_op->write)
435 			ret = file->f_op->write(file, buf, count, pos);
436 		else
437 			ret = do_sync_write(file, buf, count, pos);
438 		if (ret > 0) {
439 			fsnotify_modify(file);
440 			add_wchar(current, ret);
441 		}
442 		inc_syscw(current);
443 	}
444 
445 	return ret;
446 }
447 
448 EXPORT_SYMBOL(vfs_write);
449 
450 static inline loff_t file_pos_read(struct file *file)
451 {
452 	return file->f_pos;
453 }
454 
455 static inline void file_pos_write(struct file *file, loff_t pos)
456 {
457 	file->f_pos = pos;
458 }
459 
460 SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count)
461 {
462 	struct fd f = fdget(fd);
463 	ssize_t ret = -EBADF;
464 
465 	if (f.file) {
466 		loff_t pos = file_pos_read(f.file);
467 		ret = vfs_read(f.file, buf, count, &pos);
468 		file_pos_write(f.file, pos);
469 		fdput(f);
470 	}
471 	return ret;
472 }
473 
474 SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf,
475 		size_t, count)
476 {
477 	struct fd f = fdget(fd);
478 	ssize_t ret = -EBADF;
479 
480 	if (f.file) {
481 		loff_t pos = file_pos_read(f.file);
482 		ret = vfs_write(f.file, buf, count, &pos);
483 		file_pos_write(f.file, pos);
484 		fdput(f);
485 	}
486 
487 	return ret;
488 }
489 
490 SYSCALL_DEFINE(pread64)(unsigned int fd, char __user *buf,
491 			size_t count, loff_t pos)
492 {
493 	struct fd f;
494 	ssize_t ret = -EBADF;
495 
496 	if (pos < 0)
497 		return -EINVAL;
498 
499 	f = fdget(fd);
500 	if (f.file) {
501 		ret = -ESPIPE;
502 		if (f.file->f_mode & FMODE_PREAD)
503 			ret = vfs_read(f.file, buf, count, &pos);
504 		fdput(f);
505 	}
506 
507 	return ret;
508 }
509 #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
510 asmlinkage long SyS_pread64(long fd, long buf, long count, loff_t pos)
511 {
512 	return SYSC_pread64((unsigned int) fd, (char __user *) buf,
513 			    (size_t) count, pos);
514 }
515 SYSCALL_ALIAS(sys_pread64, SyS_pread64);
516 #endif
517 
518 SYSCALL_DEFINE(pwrite64)(unsigned int fd, const char __user *buf,
519 			 size_t count, loff_t pos)
520 {
521 	struct fd f;
522 	ssize_t ret = -EBADF;
523 
524 	if (pos < 0)
525 		return -EINVAL;
526 
527 	f = fdget(fd);
528 	if (f.file) {
529 		ret = -ESPIPE;
530 		if (f.file->f_mode & FMODE_PWRITE)
531 			ret = vfs_write(f.file, buf, count, &pos);
532 		fdput(f);
533 	}
534 
535 	return ret;
536 }
537 #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
538 asmlinkage long SyS_pwrite64(long fd, long buf, long count, loff_t pos)
539 {
540 	return SYSC_pwrite64((unsigned int) fd, (const char __user *) buf,
541 			     (size_t) count, pos);
542 }
543 SYSCALL_ALIAS(sys_pwrite64, SyS_pwrite64);
544 #endif
545 
546 /*
547  * Reduce an iovec's length in-place.  Return the resulting number of segments
548  */
549 unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to)
550 {
551 	unsigned long seg = 0;
552 	size_t len = 0;
553 
554 	while (seg < nr_segs) {
555 		seg++;
556 		if (len + iov->iov_len >= to) {
557 			iov->iov_len = to - len;
558 			break;
559 		}
560 		len += iov->iov_len;
561 		iov++;
562 	}
563 	return seg;
564 }
565 EXPORT_SYMBOL(iov_shorten);
566 
567 ssize_t do_sync_readv_writev(struct file *filp, const struct iovec *iov,
568 		unsigned long nr_segs, size_t len, loff_t *ppos, iov_fn_t fn)
569 {
570 	struct kiocb kiocb;
571 	ssize_t ret;
572 
573 	init_sync_kiocb(&kiocb, filp);
574 	kiocb.ki_pos = *ppos;
575 	kiocb.ki_left = len;
576 	kiocb.ki_nbytes = len;
577 
578 	for (;;) {
579 		ret = fn(&kiocb, iov, nr_segs, kiocb.ki_pos);
580 		if (ret != -EIOCBRETRY)
581 			break;
582 		wait_on_retry_sync_kiocb(&kiocb);
583 	}
584 
585 	if (ret == -EIOCBQUEUED)
586 		ret = wait_on_sync_kiocb(&kiocb);
587 	*ppos = kiocb.ki_pos;
588 	return ret;
589 }
590 
591 /* Do it by hand, with file-ops */
592 ssize_t do_loop_readv_writev(struct file *filp, struct iovec *iov,
593 		unsigned long nr_segs, loff_t *ppos, io_fn_t fn)
594 {
595 	struct iovec *vector = iov;
596 	ssize_t ret = 0;
597 
598 	while (nr_segs > 0) {
599 		void __user *base;
600 		size_t len;
601 		ssize_t nr;
602 
603 		base = vector->iov_base;
604 		len = vector->iov_len;
605 		vector++;
606 		nr_segs--;
607 
608 		nr = fn(filp, base, len, ppos);
609 
610 		if (nr < 0) {
611 			if (!ret)
612 				ret = nr;
613 			break;
614 		}
615 		ret += nr;
616 		if (nr != len)
617 			break;
618 	}
619 
620 	return ret;
621 }
622 
623 /* A write operation does a read from user space and vice versa */
624 #define vrfy_dir(type) ((type) == READ ? VERIFY_WRITE : VERIFY_READ)
625 
626 ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
627 			      unsigned long nr_segs, unsigned long fast_segs,
628 			      struct iovec *fast_pointer,
629 			      struct iovec **ret_pointer)
630 {
631 	unsigned long seg;
632 	ssize_t ret;
633 	struct iovec *iov = fast_pointer;
634 
635 	/*
636 	 * SuS says "The readv() function *may* fail if the iovcnt argument
637 	 * was less than or equal to 0, or greater than {IOV_MAX}.  Linux has
638 	 * traditionally returned zero for zero segments, so...
639 	 */
640 	if (nr_segs == 0) {
641 		ret = 0;
642 		goto out;
643 	}
644 
645 	/*
646 	 * First get the "struct iovec" from user memory and
647 	 * verify all the pointers
648 	 */
649 	if (nr_segs > UIO_MAXIOV) {
650 		ret = -EINVAL;
651 		goto out;
652 	}
653 	if (nr_segs > fast_segs) {
654 		iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
655 		if (iov == NULL) {
656 			ret = -ENOMEM;
657 			goto out;
658 		}
659 	}
660 	if (copy_from_user(iov, uvector, nr_segs*sizeof(*uvector))) {
661 		ret = -EFAULT;
662 		goto out;
663 	}
664 
665 	/*
666 	 * According to the Single Unix Specification we should return EINVAL
667 	 * if an element length is < 0 when cast to ssize_t or if the
668 	 * total length would overflow the ssize_t return value of the
669 	 * system call.
670 	 *
671 	 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
672 	 * overflow case.
673 	 */
674 	ret = 0;
675 	for (seg = 0; seg < nr_segs; seg++) {
676 		void __user *buf = iov[seg].iov_base;
677 		ssize_t len = (ssize_t)iov[seg].iov_len;
678 
679 		/* see if we we're about to use an invalid len or if
680 		 * it's about to overflow ssize_t */
681 		if (len < 0) {
682 			ret = -EINVAL;
683 			goto out;
684 		}
685 		if (type >= 0
686 		    && unlikely(!access_ok(vrfy_dir(type), buf, len))) {
687 			ret = -EFAULT;
688 			goto out;
689 		}
690 		if (len > MAX_RW_COUNT - ret) {
691 			len = MAX_RW_COUNT - ret;
692 			iov[seg].iov_len = len;
693 		}
694 		ret += len;
695 	}
696 out:
697 	*ret_pointer = iov;
698 	return ret;
699 }
700 
701 static ssize_t do_readv_writev(int type, struct file *file,
702 			       const struct iovec __user * uvector,
703 			       unsigned long nr_segs, loff_t *pos)
704 {
705 	size_t tot_len;
706 	struct iovec iovstack[UIO_FASTIOV];
707 	struct iovec *iov = iovstack;
708 	ssize_t ret;
709 	io_fn_t fn;
710 	iov_fn_t fnv;
711 
712 	if (!file->f_op) {
713 		ret = -EINVAL;
714 		goto out;
715 	}
716 
717 	ret = rw_copy_check_uvector(type, uvector, nr_segs,
718 				    ARRAY_SIZE(iovstack), iovstack, &iov);
719 	if (ret <= 0)
720 		goto out;
721 
722 	tot_len = ret;
723 	ret = rw_verify_area(type, file, pos, tot_len);
724 	if (ret < 0)
725 		goto out;
726 
727 	fnv = NULL;
728 	if (type == READ) {
729 		fn = file->f_op->read;
730 		fnv = file->f_op->aio_read;
731 	} else {
732 		fn = (io_fn_t)file->f_op->write;
733 		fnv = file->f_op->aio_write;
734 	}
735 
736 	if (fnv)
737 		ret = do_sync_readv_writev(file, iov, nr_segs, tot_len,
738 						pos, fnv);
739 	else
740 		ret = do_loop_readv_writev(file, iov, nr_segs, pos, fn);
741 
742 out:
743 	if (iov != iovstack)
744 		kfree(iov);
745 	if ((ret + (type == READ)) > 0) {
746 		if (type == READ)
747 			fsnotify_access(file);
748 		else
749 			fsnotify_modify(file);
750 	}
751 	return ret;
752 }
753 
754 ssize_t vfs_readv(struct file *file, const struct iovec __user *vec,
755 		  unsigned long vlen, loff_t *pos)
756 {
757 	if (!(file->f_mode & FMODE_READ))
758 		return -EBADF;
759 	if (!file->f_op || (!file->f_op->aio_read && !file->f_op->read))
760 		return -EINVAL;
761 
762 	return do_readv_writev(READ, file, vec, vlen, pos);
763 }
764 
765 EXPORT_SYMBOL(vfs_readv);
766 
767 ssize_t vfs_writev(struct file *file, const struct iovec __user *vec,
768 		   unsigned long vlen, loff_t *pos)
769 {
770 	if (!(file->f_mode & FMODE_WRITE))
771 		return -EBADF;
772 	if (!file->f_op || (!file->f_op->aio_write && !file->f_op->write))
773 		return -EINVAL;
774 
775 	return do_readv_writev(WRITE, file, vec, vlen, pos);
776 }
777 
778 EXPORT_SYMBOL(vfs_writev);
779 
780 SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
781 		unsigned long, vlen)
782 {
783 	struct fd f = fdget(fd);
784 	ssize_t ret = -EBADF;
785 
786 	if (f.file) {
787 		loff_t pos = file_pos_read(f.file);
788 		ret = vfs_readv(f.file, vec, vlen, &pos);
789 		file_pos_write(f.file, pos);
790 		fdput(f);
791 	}
792 
793 	if (ret > 0)
794 		add_rchar(current, ret);
795 	inc_syscr(current);
796 	return ret;
797 }
798 
799 SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec,
800 		unsigned long, vlen)
801 {
802 	struct fd f = fdget(fd);
803 	ssize_t ret = -EBADF;
804 
805 	if (f.file) {
806 		loff_t pos = file_pos_read(f.file);
807 		ret = vfs_writev(f.file, vec, vlen, &pos);
808 		file_pos_write(f.file, pos);
809 		fdput(f);
810 	}
811 
812 	if (ret > 0)
813 		add_wchar(current, ret);
814 	inc_syscw(current);
815 	return ret;
816 }
817 
818 static inline loff_t pos_from_hilo(unsigned long high, unsigned long low)
819 {
820 #define HALF_LONG_BITS (BITS_PER_LONG / 2)
821 	return (((loff_t)high << HALF_LONG_BITS) << HALF_LONG_BITS) | low;
822 }
823 
824 SYSCALL_DEFINE5(preadv, unsigned long, fd, const struct iovec __user *, vec,
825 		unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
826 {
827 	loff_t pos = pos_from_hilo(pos_h, pos_l);
828 	struct fd f;
829 	ssize_t ret = -EBADF;
830 
831 	if (pos < 0)
832 		return -EINVAL;
833 
834 	f = fdget(fd);
835 	if (f.file) {
836 		ret = -ESPIPE;
837 		if (f.file->f_mode & FMODE_PREAD)
838 			ret = vfs_readv(f.file, vec, vlen, &pos);
839 		fdput(f);
840 	}
841 
842 	if (ret > 0)
843 		add_rchar(current, ret);
844 	inc_syscr(current);
845 	return ret;
846 }
847 
848 SYSCALL_DEFINE5(pwritev, unsigned long, fd, const struct iovec __user *, vec,
849 		unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
850 {
851 	loff_t pos = pos_from_hilo(pos_h, pos_l);
852 	struct fd f;
853 	ssize_t ret = -EBADF;
854 
855 	if (pos < 0)
856 		return -EINVAL;
857 
858 	f = fdget(fd);
859 	if (f.file) {
860 		ret = -ESPIPE;
861 		if (f.file->f_mode & FMODE_PWRITE)
862 			ret = vfs_writev(f.file, vec, vlen, &pos);
863 		fdput(f);
864 	}
865 
866 	if (ret > 0)
867 		add_wchar(current, ret);
868 	inc_syscw(current);
869 	return ret;
870 }
871 
872 ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, size_t count,
873 		    loff_t max)
874 {
875 	struct fd in, out;
876 	struct inode *in_inode, *out_inode;
877 	loff_t pos;
878 	ssize_t retval;
879 	int fl;
880 
881 	/*
882 	 * Get input file, and verify that it is ok..
883 	 */
884 	retval = -EBADF;
885 	in = fdget(in_fd);
886 	if (!in.file)
887 		goto out;
888 	if (!(in.file->f_mode & FMODE_READ))
889 		goto fput_in;
890 	retval = -ESPIPE;
891 	if (!ppos)
892 		ppos = &in.file->f_pos;
893 	else
894 		if (!(in.file->f_mode & FMODE_PREAD))
895 			goto fput_in;
896 	retval = rw_verify_area(READ, in.file, ppos, count);
897 	if (retval < 0)
898 		goto fput_in;
899 	count = retval;
900 
901 	/*
902 	 * Get output file, and verify that it is ok..
903 	 */
904 	retval = -EBADF;
905 	out = fdget(out_fd);
906 	if (!out.file)
907 		goto fput_in;
908 	if (!(out.file->f_mode & FMODE_WRITE))
909 		goto fput_out;
910 	retval = -EINVAL;
911 	in_inode = file_inode(in.file);
912 	out_inode = file_inode(out.file);
913 	retval = rw_verify_area(WRITE, out.file, &out.file->f_pos, count);
914 	if (retval < 0)
915 		goto fput_out;
916 	count = retval;
917 
918 	if (!max)
919 		max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes);
920 
921 	pos = *ppos;
922 	if (unlikely(pos + count > max)) {
923 		retval = -EOVERFLOW;
924 		if (pos >= max)
925 			goto fput_out;
926 		count = max - pos;
927 	}
928 
929 	fl = 0;
930 #if 0
931 	/*
932 	 * We need to debate whether we can enable this or not. The
933 	 * man page documents EAGAIN return for the output at least,
934 	 * and the application is arguably buggy if it doesn't expect
935 	 * EAGAIN on a non-blocking file descriptor.
936 	 */
937 	if (in.file->f_flags & O_NONBLOCK)
938 		fl = SPLICE_F_NONBLOCK;
939 #endif
940 	retval = do_splice_direct(in.file, ppos, out.file, count, fl);
941 
942 	if (retval > 0) {
943 		add_rchar(current, retval);
944 		add_wchar(current, retval);
945 		fsnotify_access(in.file);
946 		fsnotify_modify(out.file);
947 	}
948 
949 	inc_syscr(current);
950 	inc_syscw(current);
951 	if (*ppos > max)
952 		retval = -EOVERFLOW;
953 
954 fput_out:
955 	fdput(out);
956 fput_in:
957 	fdput(in);
958 out:
959 	return retval;
960 }
961 
962 SYSCALL_DEFINE4(sendfile, int, out_fd, int, in_fd, off_t __user *, offset, size_t, count)
963 {
964 	loff_t pos;
965 	off_t off;
966 	ssize_t ret;
967 
968 	if (offset) {
969 		if (unlikely(get_user(off, offset)))
970 			return -EFAULT;
971 		pos = off;
972 		ret = do_sendfile(out_fd, in_fd, &pos, count, MAX_NON_LFS);
973 		if (unlikely(put_user(pos, offset)))
974 			return -EFAULT;
975 		return ret;
976 	}
977 
978 	return do_sendfile(out_fd, in_fd, NULL, count, 0);
979 }
980 
981 SYSCALL_DEFINE4(sendfile64, int, out_fd, int, in_fd, loff_t __user *, offset, size_t, count)
982 {
983 	loff_t pos;
984 	ssize_t ret;
985 
986 	if (offset) {
987 		if (unlikely(copy_from_user(&pos, offset, sizeof(loff_t))))
988 			return -EFAULT;
989 		ret = do_sendfile(out_fd, in_fd, &pos, count, 0);
990 		if (unlikely(put_user(pos, offset)))
991 			return -EFAULT;
992 		return ret;
993 	}
994 
995 	return do_sendfile(out_fd, in_fd, NULL, count, 0);
996 }
997