xref: /openbmc/linux/lib/iov_iter.c (revision d623f60d)
1 #include <linux/export.h>
2 #include <linux/bvec.h>
3 #include <linux/uio.h>
4 #include <linux/pagemap.h>
5 #include <linux/slab.h>
6 #include <linux/vmalloc.h>
7 #include <linux/splice.h>
8 #include <net/checksum.h>
9 
10 #define PIPE_PARANOIA /* for now */
11 
12 #define iterate_iovec(i, n, __v, __p, skip, STEP) {	\
13 	size_t left;					\
14 	size_t wanted = n;				\
15 	__p = i->iov;					\
16 	__v.iov_len = min(n, __p->iov_len - skip);	\
17 	if (likely(__v.iov_len)) {			\
18 		__v.iov_base = __p->iov_base + skip;	\
19 		left = (STEP);				\
20 		__v.iov_len -= left;			\
21 		skip += __v.iov_len;			\
22 		n -= __v.iov_len;			\
23 	} else {					\
24 		left = 0;				\
25 	}						\
26 	while (unlikely(!left && n)) {			\
27 		__p++;					\
28 		__v.iov_len = min(n, __p->iov_len);	\
29 		if (unlikely(!__v.iov_len))		\
30 			continue;			\
31 		__v.iov_base = __p->iov_base;		\
32 		left = (STEP);				\
33 		__v.iov_len -= left;			\
34 		skip = __v.iov_len;			\
35 		n -= __v.iov_len;			\
36 	}						\
37 	n = wanted - n;					\
38 }
39 
40 #define iterate_kvec(i, n, __v, __p, skip, STEP) {	\
41 	size_t wanted = n;				\
42 	__p = i->kvec;					\
43 	__v.iov_len = min(n, __p->iov_len - skip);	\
44 	if (likely(__v.iov_len)) {			\
45 		__v.iov_base = __p->iov_base + skip;	\
46 		(void)(STEP);				\
47 		skip += __v.iov_len;			\
48 		n -= __v.iov_len;			\
49 	}						\
50 	while (unlikely(n)) {				\
51 		__p++;					\
52 		__v.iov_len = min(n, __p->iov_len);	\
53 		if (unlikely(!__v.iov_len))		\
54 			continue;			\
55 		__v.iov_base = __p->iov_base;		\
56 		(void)(STEP);				\
57 		skip = __v.iov_len;			\
58 		n -= __v.iov_len;			\
59 	}						\
60 	n = wanted;					\
61 }
62 
63 #define iterate_bvec(i, n, __v, __bi, skip, STEP) {	\
64 	struct bvec_iter __start;			\
65 	__start.bi_size = n;				\
66 	__start.bi_bvec_done = skip;			\
67 	__start.bi_idx = 0;				\
68 	for_each_bvec(__v, i->bvec, __bi, __start) {	\
69 		if (!__v.bv_len)			\
70 			continue;			\
71 		(void)(STEP);				\
72 	}						\
73 }
74 
75 #define iterate_all_kinds(i, n, v, I, B, K) {			\
76 	if (likely(n)) {					\
77 		size_t skip = i->iov_offset;			\
78 		if (unlikely(i->type & ITER_BVEC)) {		\
79 			struct bio_vec v;			\
80 			struct bvec_iter __bi;			\
81 			iterate_bvec(i, n, v, __bi, skip, (B))	\
82 		} else if (unlikely(i->type & ITER_KVEC)) {	\
83 			const struct kvec *kvec;		\
84 			struct kvec v;				\
85 			iterate_kvec(i, n, v, kvec, skip, (K))	\
86 		} else {					\
87 			const struct iovec *iov;		\
88 			struct iovec v;				\
89 			iterate_iovec(i, n, v, iov, skip, (I))	\
90 		}						\
91 	}							\
92 }
93 
94 #define iterate_and_advance(i, n, v, I, B, K) {			\
95 	if (unlikely(i->count < n))				\
96 		n = i->count;					\
97 	if (i->count) {						\
98 		size_t skip = i->iov_offset;			\
99 		if (unlikely(i->type & ITER_BVEC)) {		\
100 			const struct bio_vec *bvec = i->bvec;	\
101 			struct bio_vec v;			\
102 			struct bvec_iter __bi;			\
103 			iterate_bvec(i, n, v, __bi, skip, (B))	\
104 			i->bvec = __bvec_iter_bvec(i->bvec, __bi);	\
105 			i->nr_segs -= i->bvec - bvec;		\
106 			skip = __bi.bi_bvec_done;		\
107 		} else if (unlikely(i->type & ITER_KVEC)) {	\
108 			const struct kvec *kvec;		\
109 			struct kvec v;				\
110 			iterate_kvec(i, n, v, kvec, skip, (K))	\
111 			if (skip == kvec->iov_len) {		\
112 				kvec++;				\
113 				skip = 0;			\
114 			}					\
115 			i->nr_segs -= kvec - i->kvec;		\
116 			i->kvec = kvec;				\
117 		} else {					\
118 			const struct iovec *iov;		\
119 			struct iovec v;				\
120 			iterate_iovec(i, n, v, iov, skip, (I))	\
121 			if (skip == iov->iov_len) {		\
122 				iov++;				\
123 				skip = 0;			\
124 			}					\
125 			i->nr_segs -= iov - i->iov;		\
126 			i->iov = iov;				\
127 		}						\
128 		i->count -= n;					\
129 		i->iov_offset = skip;				\
130 	}							\
131 }
132 
133 static int copyout(void __user *to, const void *from, size_t n)
134 {
135 	if (access_ok(VERIFY_WRITE, to, n)) {
136 		kasan_check_read(from, n);
137 		n = raw_copy_to_user(to, from, n);
138 	}
139 	return n;
140 }
141 
142 static int copyin(void *to, const void __user *from, size_t n)
143 {
144 	if (access_ok(VERIFY_READ, from, n)) {
145 		kasan_check_write(to, n);
146 		n = raw_copy_from_user(to, from, n);
147 	}
148 	return n;
149 }
150 
151 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
152 			 struct iov_iter *i)
153 {
154 	size_t skip, copy, left, wanted;
155 	const struct iovec *iov;
156 	char __user *buf;
157 	void *kaddr, *from;
158 
159 	if (unlikely(bytes > i->count))
160 		bytes = i->count;
161 
162 	if (unlikely(!bytes))
163 		return 0;
164 
165 	might_fault();
166 	wanted = bytes;
167 	iov = i->iov;
168 	skip = i->iov_offset;
169 	buf = iov->iov_base + skip;
170 	copy = min(bytes, iov->iov_len - skip);
171 
172 	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
173 		kaddr = kmap_atomic(page);
174 		from = kaddr + offset;
175 
176 		/* first chunk, usually the only one */
177 		left = copyout(buf, from, copy);
178 		copy -= left;
179 		skip += copy;
180 		from += copy;
181 		bytes -= copy;
182 
183 		while (unlikely(!left && bytes)) {
184 			iov++;
185 			buf = iov->iov_base;
186 			copy = min(bytes, iov->iov_len);
187 			left = copyout(buf, from, copy);
188 			copy -= left;
189 			skip = copy;
190 			from += copy;
191 			bytes -= copy;
192 		}
193 		if (likely(!bytes)) {
194 			kunmap_atomic(kaddr);
195 			goto done;
196 		}
197 		offset = from - kaddr;
198 		buf += copy;
199 		kunmap_atomic(kaddr);
200 		copy = min(bytes, iov->iov_len - skip);
201 	}
202 	/* Too bad - revert to non-atomic kmap */
203 
204 	kaddr = kmap(page);
205 	from = kaddr + offset;
206 	left = copyout(buf, from, copy);
207 	copy -= left;
208 	skip += copy;
209 	from += copy;
210 	bytes -= copy;
211 	while (unlikely(!left && bytes)) {
212 		iov++;
213 		buf = iov->iov_base;
214 		copy = min(bytes, iov->iov_len);
215 		left = copyout(buf, from, copy);
216 		copy -= left;
217 		skip = copy;
218 		from += copy;
219 		bytes -= copy;
220 	}
221 	kunmap(page);
222 
223 done:
224 	if (skip == iov->iov_len) {
225 		iov++;
226 		skip = 0;
227 	}
228 	i->count -= wanted - bytes;
229 	i->nr_segs -= iov - i->iov;
230 	i->iov = iov;
231 	i->iov_offset = skip;
232 	return wanted - bytes;
233 }
234 
235 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
236 			 struct iov_iter *i)
237 {
238 	size_t skip, copy, left, wanted;
239 	const struct iovec *iov;
240 	char __user *buf;
241 	void *kaddr, *to;
242 
243 	if (unlikely(bytes > i->count))
244 		bytes = i->count;
245 
246 	if (unlikely(!bytes))
247 		return 0;
248 
249 	might_fault();
250 	wanted = bytes;
251 	iov = i->iov;
252 	skip = i->iov_offset;
253 	buf = iov->iov_base + skip;
254 	copy = min(bytes, iov->iov_len - skip);
255 
256 	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
257 		kaddr = kmap_atomic(page);
258 		to = kaddr + offset;
259 
260 		/* first chunk, usually the only one */
261 		left = copyin(to, buf, copy);
262 		copy -= left;
263 		skip += copy;
264 		to += copy;
265 		bytes -= copy;
266 
267 		while (unlikely(!left && bytes)) {
268 			iov++;
269 			buf = iov->iov_base;
270 			copy = min(bytes, iov->iov_len);
271 			left = copyin(to, buf, copy);
272 			copy -= left;
273 			skip = copy;
274 			to += copy;
275 			bytes -= copy;
276 		}
277 		if (likely(!bytes)) {
278 			kunmap_atomic(kaddr);
279 			goto done;
280 		}
281 		offset = to - kaddr;
282 		buf += copy;
283 		kunmap_atomic(kaddr);
284 		copy = min(bytes, iov->iov_len - skip);
285 	}
286 	/* Too bad - revert to non-atomic kmap */
287 
288 	kaddr = kmap(page);
289 	to = kaddr + offset;
290 	left = copyin(to, buf, copy);
291 	copy -= left;
292 	skip += copy;
293 	to += copy;
294 	bytes -= copy;
295 	while (unlikely(!left && bytes)) {
296 		iov++;
297 		buf = iov->iov_base;
298 		copy = min(bytes, iov->iov_len);
299 		left = copyin(to, buf, copy);
300 		copy -= left;
301 		skip = copy;
302 		to += copy;
303 		bytes -= copy;
304 	}
305 	kunmap(page);
306 
307 done:
308 	if (skip == iov->iov_len) {
309 		iov++;
310 		skip = 0;
311 	}
312 	i->count -= wanted - bytes;
313 	i->nr_segs -= iov - i->iov;
314 	i->iov = iov;
315 	i->iov_offset = skip;
316 	return wanted - bytes;
317 }
318 
319 #ifdef PIPE_PARANOIA
320 static bool sanity(const struct iov_iter *i)
321 {
322 	struct pipe_inode_info *pipe = i->pipe;
323 	int idx = i->idx;
324 	int next = pipe->curbuf + pipe->nrbufs;
325 	if (i->iov_offset) {
326 		struct pipe_buffer *p;
327 		if (unlikely(!pipe->nrbufs))
328 			goto Bad;	// pipe must be non-empty
329 		if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
330 			goto Bad;	// must be at the last buffer...
331 
332 		p = &pipe->bufs[idx];
333 		if (unlikely(p->offset + p->len != i->iov_offset))
334 			goto Bad;	// ... at the end of segment
335 	} else {
336 		if (idx != (next & (pipe->buffers - 1)))
337 			goto Bad;	// must be right after the last buffer
338 	}
339 	return true;
340 Bad:
341 	printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
342 	printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
343 			pipe->curbuf, pipe->nrbufs, pipe->buffers);
344 	for (idx = 0; idx < pipe->buffers; idx++)
345 		printk(KERN_ERR "[%p %p %d %d]\n",
346 			pipe->bufs[idx].ops,
347 			pipe->bufs[idx].page,
348 			pipe->bufs[idx].offset,
349 			pipe->bufs[idx].len);
350 	WARN_ON(1);
351 	return false;
352 }
353 #else
354 #define sanity(i) true
355 #endif
356 
357 static inline int next_idx(int idx, struct pipe_inode_info *pipe)
358 {
359 	return (idx + 1) & (pipe->buffers - 1);
360 }
361 
362 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
363 			 struct iov_iter *i)
364 {
365 	struct pipe_inode_info *pipe = i->pipe;
366 	struct pipe_buffer *buf;
367 	size_t off;
368 	int idx;
369 
370 	if (unlikely(bytes > i->count))
371 		bytes = i->count;
372 
373 	if (unlikely(!bytes))
374 		return 0;
375 
376 	if (!sanity(i))
377 		return 0;
378 
379 	off = i->iov_offset;
380 	idx = i->idx;
381 	buf = &pipe->bufs[idx];
382 	if (off) {
383 		if (offset == off && buf->page == page) {
384 			/* merge with the last one */
385 			buf->len += bytes;
386 			i->iov_offset += bytes;
387 			goto out;
388 		}
389 		idx = next_idx(idx, pipe);
390 		buf = &pipe->bufs[idx];
391 	}
392 	if (idx == pipe->curbuf && pipe->nrbufs)
393 		return 0;
394 	pipe->nrbufs++;
395 	buf->ops = &page_cache_pipe_buf_ops;
396 	get_page(buf->page = page);
397 	buf->offset = offset;
398 	buf->len = bytes;
399 	i->iov_offset = offset + bytes;
400 	i->idx = idx;
401 out:
402 	i->count -= bytes;
403 	return bytes;
404 }
405 
406 /*
407  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
408  * bytes.  For each iovec, fault in each page that constitutes the iovec.
409  *
410  * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
411  * because it is an invalid address).
412  */
413 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
414 {
415 	size_t skip = i->iov_offset;
416 	const struct iovec *iov;
417 	int err;
418 	struct iovec v;
419 
420 	if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
421 		iterate_iovec(i, bytes, v, iov, skip, ({
422 			err = fault_in_pages_readable(v.iov_base, v.iov_len);
423 			if (unlikely(err))
424 			return err;
425 		0;}))
426 	}
427 	return 0;
428 }
429 EXPORT_SYMBOL(iov_iter_fault_in_readable);
430 
431 void iov_iter_init(struct iov_iter *i, int direction,
432 			const struct iovec *iov, unsigned long nr_segs,
433 			size_t count)
434 {
435 	/* It will get better.  Eventually... */
436 	if (uaccess_kernel()) {
437 		direction |= ITER_KVEC;
438 		i->type = direction;
439 		i->kvec = (struct kvec *)iov;
440 	} else {
441 		i->type = direction;
442 		i->iov = iov;
443 	}
444 	i->nr_segs = nr_segs;
445 	i->iov_offset = 0;
446 	i->count = count;
447 }
448 EXPORT_SYMBOL(iov_iter_init);
449 
450 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
451 {
452 	char *from = kmap_atomic(page);
453 	memcpy(to, from + offset, len);
454 	kunmap_atomic(from);
455 }
456 
457 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
458 {
459 	char *to = kmap_atomic(page);
460 	memcpy(to + offset, from, len);
461 	kunmap_atomic(to);
462 }
463 
464 static void memzero_page(struct page *page, size_t offset, size_t len)
465 {
466 	char *addr = kmap_atomic(page);
467 	memset(addr + offset, 0, len);
468 	kunmap_atomic(addr);
469 }
470 
471 static inline bool allocated(struct pipe_buffer *buf)
472 {
473 	return buf->ops == &default_pipe_buf_ops;
474 }
475 
476 static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
477 {
478 	size_t off = i->iov_offset;
479 	int idx = i->idx;
480 	if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
481 		idx = next_idx(idx, i->pipe);
482 		off = 0;
483 	}
484 	*idxp = idx;
485 	*offp = off;
486 }
487 
488 static size_t push_pipe(struct iov_iter *i, size_t size,
489 			int *idxp, size_t *offp)
490 {
491 	struct pipe_inode_info *pipe = i->pipe;
492 	size_t off;
493 	int idx;
494 	ssize_t left;
495 
496 	if (unlikely(size > i->count))
497 		size = i->count;
498 	if (unlikely(!size))
499 		return 0;
500 
501 	left = size;
502 	data_start(i, &idx, &off);
503 	*idxp = idx;
504 	*offp = off;
505 	if (off) {
506 		left -= PAGE_SIZE - off;
507 		if (left <= 0) {
508 			pipe->bufs[idx].len += size;
509 			return size;
510 		}
511 		pipe->bufs[idx].len = PAGE_SIZE;
512 		idx = next_idx(idx, pipe);
513 	}
514 	while (idx != pipe->curbuf || !pipe->nrbufs) {
515 		struct page *page = alloc_page(GFP_USER);
516 		if (!page)
517 			break;
518 		pipe->nrbufs++;
519 		pipe->bufs[idx].ops = &default_pipe_buf_ops;
520 		pipe->bufs[idx].page = page;
521 		pipe->bufs[idx].offset = 0;
522 		if (left <= PAGE_SIZE) {
523 			pipe->bufs[idx].len = left;
524 			return size;
525 		}
526 		pipe->bufs[idx].len = PAGE_SIZE;
527 		left -= PAGE_SIZE;
528 		idx = next_idx(idx, pipe);
529 	}
530 	return size - left;
531 }
532 
533 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
534 				struct iov_iter *i)
535 {
536 	struct pipe_inode_info *pipe = i->pipe;
537 	size_t n, off;
538 	int idx;
539 
540 	if (!sanity(i))
541 		return 0;
542 
543 	bytes = n = push_pipe(i, bytes, &idx, &off);
544 	if (unlikely(!n))
545 		return 0;
546 	for ( ; n; idx = next_idx(idx, pipe), off = 0) {
547 		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
548 		memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
549 		i->idx = idx;
550 		i->iov_offset = off + chunk;
551 		n -= chunk;
552 		addr += chunk;
553 	}
554 	i->count -= bytes;
555 	return bytes;
556 }
557 
558 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
559 {
560 	const char *from = addr;
561 	if (unlikely(i->type & ITER_PIPE))
562 		return copy_pipe_to_iter(addr, bytes, i);
563 	if (iter_is_iovec(i))
564 		might_fault();
565 	iterate_and_advance(i, bytes, v,
566 		copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
567 		memcpy_to_page(v.bv_page, v.bv_offset,
568 			       (from += v.bv_len) - v.bv_len, v.bv_len),
569 		memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
570 	)
571 
572 	return bytes;
573 }
574 EXPORT_SYMBOL(_copy_to_iter);
575 
576 #ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
577 static int copyout_mcsafe(void __user *to, const void *from, size_t n)
578 {
579 	if (access_ok(VERIFY_WRITE, to, n)) {
580 		kasan_check_read(from, n);
581 		n = copy_to_user_mcsafe((__force void *) to, from, n);
582 	}
583 	return n;
584 }
585 
586 static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
587 		const char *from, size_t len)
588 {
589 	unsigned long ret;
590 	char *to;
591 
592 	to = kmap_atomic(page);
593 	ret = memcpy_mcsafe(to + offset, from, len);
594 	kunmap_atomic(to);
595 
596 	return ret;
597 }
598 
599 size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
600 {
601 	const char *from = addr;
602 	unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
603 
604 	if (unlikely(i->type & ITER_PIPE)) {
605 		WARN_ON(1);
606 		return 0;
607 	}
608 	if (iter_is_iovec(i))
609 		might_fault();
610 	iterate_and_advance(i, bytes, v,
611 		copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
612 		({
613 		rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset,
614                                (from += v.bv_len) - v.bv_len, v.bv_len);
615 		if (rem) {
616 			curr_addr = (unsigned long) from;
617 			bytes = curr_addr - s_addr - rem;
618 			return bytes;
619 		}
620 		}),
621 		({
622 		rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len,
623 				v.iov_len);
624 		if (rem) {
625 			curr_addr = (unsigned long) from;
626 			bytes = curr_addr - s_addr - rem;
627 			return bytes;
628 		}
629 		})
630 	)
631 
632 	return bytes;
633 }
634 EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe);
635 #endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */
636 
637 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
638 {
639 	char *to = addr;
640 	if (unlikely(i->type & ITER_PIPE)) {
641 		WARN_ON(1);
642 		return 0;
643 	}
644 	if (iter_is_iovec(i))
645 		might_fault();
646 	iterate_and_advance(i, bytes, v,
647 		copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
648 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
649 				 v.bv_offset, v.bv_len),
650 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
651 	)
652 
653 	return bytes;
654 }
655 EXPORT_SYMBOL(_copy_from_iter);
656 
657 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
658 {
659 	char *to = addr;
660 	if (unlikely(i->type & ITER_PIPE)) {
661 		WARN_ON(1);
662 		return false;
663 	}
664 	if (unlikely(i->count < bytes))
665 		return false;
666 
667 	if (iter_is_iovec(i))
668 		might_fault();
669 	iterate_all_kinds(i, bytes, v, ({
670 		if (copyin((to += v.iov_len) - v.iov_len,
671 				      v.iov_base, v.iov_len))
672 			return false;
673 		0;}),
674 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
675 				 v.bv_offset, v.bv_len),
676 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
677 	)
678 
679 	iov_iter_advance(i, bytes);
680 	return true;
681 }
682 EXPORT_SYMBOL(_copy_from_iter_full);
683 
684 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
685 {
686 	char *to = addr;
687 	if (unlikely(i->type & ITER_PIPE)) {
688 		WARN_ON(1);
689 		return 0;
690 	}
691 	iterate_and_advance(i, bytes, v,
692 		__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
693 					 v.iov_base, v.iov_len),
694 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
695 				 v.bv_offset, v.bv_len),
696 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
697 	)
698 
699 	return bytes;
700 }
701 EXPORT_SYMBOL(_copy_from_iter_nocache);
702 
703 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
704 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
705 {
706 	char *to = addr;
707 	if (unlikely(i->type & ITER_PIPE)) {
708 		WARN_ON(1);
709 		return 0;
710 	}
711 	iterate_and_advance(i, bytes, v,
712 		__copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
713 					 v.iov_base, v.iov_len),
714 		memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
715 				 v.bv_offset, v.bv_len),
716 		memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
717 			v.iov_len)
718 	)
719 
720 	return bytes;
721 }
722 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
723 #endif
724 
725 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
726 {
727 	char *to = addr;
728 	if (unlikely(i->type & ITER_PIPE)) {
729 		WARN_ON(1);
730 		return false;
731 	}
732 	if (unlikely(i->count < bytes))
733 		return false;
734 	iterate_all_kinds(i, bytes, v, ({
735 		if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
736 					     v.iov_base, v.iov_len))
737 			return false;
738 		0;}),
739 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
740 				 v.bv_offset, v.bv_len),
741 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
742 	)
743 
744 	iov_iter_advance(i, bytes);
745 	return true;
746 }
747 EXPORT_SYMBOL(_copy_from_iter_full_nocache);
748 
749 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
750 {
751 	struct page *head = compound_head(page);
752 	size_t v = n + offset + page_address(page) - page_address(head);
753 
754 	if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
755 		return true;
756 	WARN_ON(1);
757 	return false;
758 }
759 
760 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
761 			 struct iov_iter *i)
762 {
763 	if (unlikely(!page_copy_sane(page, offset, bytes)))
764 		return 0;
765 	if (i->type & (ITER_BVEC|ITER_KVEC)) {
766 		void *kaddr = kmap_atomic(page);
767 		size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
768 		kunmap_atomic(kaddr);
769 		return wanted;
770 	} else if (likely(!(i->type & ITER_PIPE)))
771 		return copy_page_to_iter_iovec(page, offset, bytes, i);
772 	else
773 		return copy_page_to_iter_pipe(page, offset, bytes, i);
774 }
775 EXPORT_SYMBOL(copy_page_to_iter);
776 
777 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
778 			 struct iov_iter *i)
779 {
780 	if (unlikely(!page_copy_sane(page, offset, bytes)))
781 		return 0;
782 	if (unlikely(i->type & ITER_PIPE)) {
783 		WARN_ON(1);
784 		return 0;
785 	}
786 	if (i->type & (ITER_BVEC|ITER_KVEC)) {
787 		void *kaddr = kmap_atomic(page);
788 		size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
789 		kunmap_atomic(kaddr);
790 		return wanted;
791 	} else
792 		return copy_page_from_iter_iovec(page, offset, bytes, i);
793 }
794 EXPORT_SYMBOL(copy_page_from_iter);
795 
796 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
797 {
798 	struct pipe_inode_info *pipe = i->pipe;
799 	size_t n, off;
800 	int idx;
801 
802 	if (!sanity(i))
803 		return 0;
804 
805 	bytes = n = push_pipe(i, bytes, &idx, &off);
806 	if (unlikely(!n))
807 		return 0;
808 
809 	for ( ; n; idx = next_idx(idx, pipe), off = 0) {
810 		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
811 		memzero_page(pipe->bufs[idx].page, off, chunk);
812 		i->idx = idx;
813 		i->iov_offset = off + chunk;
814 		n -= chunk;
815 	}
816 	i->count -= bytes;
817 	return bytes;
818 }
819 
820 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
821 {
822 	if (unlikely(i->type & ITER_PIPE))
823 		return pipe_zero(bytes, i);
824 	iterate_and_advance(i, bytes, v,
825 		clear_user(v.iov_base, v.iov_len),
826 		memzero_page(v.bv_page, v.bv_offset, v.bv_len),
827 		memset(v.iov_base, 0, v.iov_len)
828 	)
829 
830 	return bytes;
831 }
832 EXPORT_SYMBOL(iov_iter_zero);
833 
834 size_t iov_iter_copy_from_user_atomic(struct page *page,
835 		struct iov_iter *i, unsigned long offset, size_t bytes)
836 {
837 	char *kaddr = kmap_atomic(page), *p = kaddr + offset;
838 	if (unlikely(!page_copy_sane(page, offset, bytes))) {
839 		kunmap_atomic(kaddr);
840 		return 0;
841 	}
842 	if (unlikely(i->type & ITER_PIPE)) {
843 		kunmap_atomic(kaddr);
844 		WARN_ON(1);
845 		return 0;
846 	}
847 	iterate_all_kinds(i, bytes, v,
848 		copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
849 		memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
850 				 v.bv_offset, v.bv_len),
851 		memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
852 	)
853 	kunmap_atomic(kaddr);
854 	return bytes;
855 }
856 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
857 
858 static inline void pipe_truncate(struct iov_iter *i)
859 {
860 	struct pipe_inode_info *pipe = i->pipe;
861 	if (pipe->nrbufs) {
862 		size_t off = i->iov_offset;
863 		int idx = i->idx;
864 		int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
865 		if (off) {
866 			pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
867 			idx = next_idx(idx, pipe);
868 			nrbufs++;
869 		}
870 		while (pipe->nrbufs > nrbufs) {
871 			pipe_buf_release(pipe, &pipe->bufs[idx]);
872 			idx = next_idx(idx, pipe);
873 			pipe->nrbufs--;
874 		}
875 	}
876 }
877 
878 static void pipe_advance(struct iov_iter *i, size_t size)
879 {
880 	struct pipe_inode_info *pipe = i->pipe;
881 	if (unlikely(i->count < size))
882 		size = i->count;
883 	if (size) {
884 		struct pipe_buffer *buf;
885 		size_t off = i->iov_offset, left = size;
886 		int idx = i->idx;
887 		if (off) /* make it relative to the beginning of buffer */
888 			left += off - pipe->bufs[idx].offset;
889 		while (1) {
890 			buf = &pipe->bufs[idx];
891 			if (left <= buf->len)
892 				break;
893 			left -= buf->len;
894 			idx = next_idx(idx, pipe);
895 		}
896 		i->idx = idx;
897 		i->iov_offset = buf->offset + left;
898 	}
899 	i->count -= size;
900 	/* ... and discard everything past that point */
901 	pipe_truncate(i);
902 }
903 
904 void iov_iter_advance(struct iov_iter *i, size_t size)
905 {
906 	if (unlikely(i->type & ITER_PIPE)) {
907 		pipe_advance(i, size);
908 		return;
909 	}
910 	iterate_and_advance(i, size, v, 0, 0, 0)
911 }
912 EXPORT_SYMBOL(iov_iter_advance);
913 
914 void iov_iter_revert(struct iov_iter *i, size_t unroll)
915 {
916 	if (!unroll)
917 		return;
918 	if (WARN_ON(unroll > MAX_RW_COUNT))
919 		return;
920 	i->count += unroll;
921 	if (unlikely(i->type & ITER_PIPE)) {
922 		struct pipe_inode_info *pipe = i->pipe;
923 		int idx = i->idx;
924 		size_t off = i->iov_offset;
925 		while (1) {
926 			size_t n = off - pipe->bufs[idx].offset;
927 			if (unroll < n) {
928 				off -= unroll;
929 				break;
930 			}
931 			unroll -= n;
932 			if (!unroll && idx == i->start_idx) {
933 				off = 0;
934 				break;
935 			}
936 			if (!idx--)
937 				idx = pipe->buffers - 1;
938 			off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
939 		}
940 		i->iov_offset = off;
941 		i->idx = idx;
942 		pipe_truncate(i);
943 		return;
944 	}
945 	if (unroll <= i->iov_offset) {
946 		i->iov_offset -= unroll;
947 		return;
948 	}
949 	unroll -= i->iov_offset;
950 	if (i->type & ITER_BVEC) {
951 		const struct bio_vec *bvec = i->bvec;
952 		while (1) {
953 			size_t n = (--bvec)->bv_len;
954 			i->nr_segs++;
955 			if (unroll <= n) {
956 				i->bvec = bvec;
957 				i->iov_offset = n - unroll;
958 				return;
959 			}
960 			unroll -= n;
961 		}
962 	} else { /* same logics for iovec and kvec */
963 		const struct iovec *iov = i->iov;
964 		while (1) {
965 			size_t n = (--iov)->iov_len;
966 			i->nr_segs++;
967 			if (unroll <= n) {
968 				i->iov = iov;
969 				i->iov_offset = n - unroll;
970 				return;
971 			}
972 			unroll -= n;
973 		}
974 	}
975 }
976 EXPORT_SYMBOL(iov_iter_revert);
977 
978 /*
979  * Return the count of just the current iov_iter segment.
980  */
981 size_t iov_iter_single_seg_count(const struct iov_iter *i)
982 {
983 	if (unlikely(i->type & ITER_PIPE))
984 		return i->count;	// it is a silly place, anyway
985 	if (i->nr_segs == 1)
986 		return i->count;
987 	else if (i->type & ITER_BVEC)
988 		return min(i->count, i->bvec->bv_len - i->iov_offset);
989 	else
990 		return min(i->count, i->iov->iov_len - i->iov_offset);
991 }
992 EXPORT_SYMBOL(iov_iter_single_seg_count);
993 
994 void iov_iter_kvec(struct iov_iter *i, int direction,
995 			const struct kvec *kvec, unsigned long nr_segs,
996 			size_t count)
997 {
998 	BUG_ON(!(direction & ITER_KVEC));
999 	i->type = direction;
1000 	i->kvec = kvec;
1001 	i->nr_segs = nr_segs;
1002 	i->iov_offset = 0;
1003 	i->count = count;
1004 }
1005 EXPORT_SYMBOL(iov_iter_kvec);
1006 
1007 void iov_iter_bvec(struct iov_iter *i, int direction,
1008 			const struct bio_vec *bvec, unsigned long nr_segs,
1009 			size_t count)
1010 {
1011 	BUG_ON(!(direction & ITER_BVEC));
1012 	i->type = direction;
1013 	i->bvec = bvec;
1014 	i->nr_segs = nr_segs;
1015 	i->iov_offset = 0;
1016 	i->count = count;
1017 }
1018 EXPORT_SYMBOL(iov_iter_bvec);
1019 
1020 void iov_iter_pipe(struct iov_iter *i, int direction,
1021 			struct pipe_inode_info *pipe,
1022 			size_t count)
1023 {
1024 	BUG_ON(direction != ITER_PIPE);
1025 	WARN_ON(pipe->nrbufs == pipe->buffers);
1026 	i->type = direction;
1027 	i->pipe = pipe;
1028 	i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1029 	i->iov_offset = 0;
1030 	i->count = count;
1031 	i->start_idx = i->idx;
1032 }
1033 EXPORT_SYMBOL(iov_iter_pipe);
1034 
1035 unsigned long iov_iter_alignment(const struct iov_iter *i)
1036 {
1037 	unsigned long res = 0;
1038 	size_t size = i->count;
1039 
1040 	if (unlikely(i->type & ITER_PIPE)) {
1041 		if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
1042 			return size | i->iov_offset;
1043 		return size;
1044 	}
1045 	iterate_all_kinds(i, size, v,
1046 		(res |= (unsigned long)v.iov_base | v.iov_len, 0),
1047 		res |= v.bv_offset | v.bv_len,
1048 		res |= (unsigned long)v.iov_base | v.iov_len
1049 	)
1050 	return res;
1051 }
1052 EXPORT_SYMBOL(iov_iter_alignment);
1053 
1054 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1055 {
1056 	unsigned long res = 0;
1057 	size_t size = i->count;
1058 
1059 	if (unlikely(i->type & ITER_PIPE)) {
1060 		WARN_ON(1);
1061 		return ~0U;
1062 	}
1063 
1064 	iterate_all_kinds(i, size, v,
1065 		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
1066 			(size != v.iov_len ? size : 0), 0),
1067 		(res |= (!res ? 0 : (unsigned long)v.bv_offset) |
1068 			(size != v.bv_len ? size : 0)),
1069 		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
1070 			(size != v.iov_len ? size : 0))
1071 		);
1072 	return res;
1073 }
1074 EXPORT_SYMBOL(iov_iter_gap_alignment);
1075 
1076 static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1077 				size_t maxsize,
1078 				struct page **pages,
1079 				int idx,
1080 				size_t *start)
1081 {
1082 	struct pipe_inode_info *pipe = i->pipe;
1083 	ssize_t n = push_pipe(i, maxsize, &idx, start);
1084 	if (!n)
1085 		return -EFAULT;
1086 
1087 	maxsize = n;
1088 	n += *start;
1089 	while (n > 0) {
1090 		get_page(*pages++ = pipe->bufs[idx].page);
1091 		idx = next_idx(idx, pipe);
1092 		n -= PAGE_SIZE;
1093 	}
1094 
1095 	return maxsize;
1096 }
1097 
1098 static ssize_t pipe_get_pages(struct iov_iter *i,
1099 		   struct page **pages, size_t maxsize, unsigned maxpages,
1100 		   size_t *start)
1101 {
1102 	unsigned npages;
1103 	size_t capacity;
1104 	int idx;
1105 
1106 	if (!maxsize)
1107 		return 0;
1108 
1109 	if (!sanity(i))
1110 		return -EFAULT;
1111 
1112 	data_start(i, &idx, start);
1113 	/* some of this one + all after this one */
1114 	npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
1115 	capacity = min(npages,maxpages) * PAGE_SIZE - *start;
1116 
1117 	return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
1118 }
1119 
1120 ssize_t iov_iter_get_pages(struct iov_iter *i,
1121 		   struct page **pages, size_t maxsize, unsigned maxpages,
1122 		   size_t *start)
1123 {
1124 	if (maxsize > i->count)
1125 		maxsize = i->count;
1126 
1127 	if (unlikely(i->type & ITER_PIPE))
1128 		return pipe_get_pages(i, pages, maxsize, maxpages, start);
1129 	iterate_all_kinds(i, maxsize, v, ({
1130 		unsigned long addr = (unsigned long)v.iov_base;
1131 		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1132 		int n;
1133 		int res;
1134 
1135 		if (len > maxpages * PAGE_SIZE)
1136 			len = maxpages * PAGE_SIZE;
1137 		addr &= ~(PAGE_SIZE - 1);
1138 		n = DIV_ROUND_UP(len, PAGE_SIZE);
1139 		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
1140 		if (unlikely(res < 0))
1141 			return res;
1142 		return (res == n ? len : res * PAGE_SIZE) - *start;
1143 	0;}),({
1144 		/* can't be more than PAGE_SIZE */
1145 		*start = v.bv_offset;
1146 		get_page(*pages = v.bv_page);
1147 		return v.bv_len;
1148 	}),({
1149 		return -EFAULT;
1150 	})
1151 	)
1152 	return 0;
1153 }
1154 EXPORT_SYMBOL(iov_iter_get_pages);
1155 
1156 static struct page **get_pages_array(size_t n)
1157 {
1158 	return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1159 }
1160 
1161 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1162 		   struct page ***pages, size_t maxsize,
1163 		   size_t *start)
1164 {
1165 	struct page **p;
1166 	ssize_t n;
1167 	int idx;
1168 	int npages;
1169 
1170 	if (!maxsize)
1171 		return 0;
1172 
1173 	if (!sanity(i))
1174 		return -EFAULT;
1175 
1176 	data_start(i, &idx, start);
1177 	/* some of this one + all after this one */
1178 	npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
1179 	n = npages * PAGE_SIZE - *start;
1180 	if (maxsize > n)
1181 		maxsize = n;
1182 	else
1183 		npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1184 	p = get_pages_array(npages);
1185 	if (!p)
1186 		return -ENOMEM;
1187 	n = __pipe_get_pages(i, maxsize, p, idx, start);
1188 	if (n > 0)
1189 		*pages = p;
1190 	else
1191 		kvfree(p);
1192 	return n;
1193 }
1194 
1195 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1196 		   struct page ***pages, size_t maxsize,
1197 		   size_t *start)
1198 {
1199 	struct page **p;
1200 
1201 	if (maxsize > i->count)
1202 		maxsize = i->count;
1203 
1204 	if (unlikely(i->type & ITER_PIPE))
1205 		return pipe_get_pages_alloc(i, pages, maxsize, start);
1206 	iterate_all_kinds(i, maxsize, v, ({
1207 		unsigned long addr = (unsigned long)v.iov_base;
1208 		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1209 		int n;
1210 		int res;
1211 
1212 		addr &= ~(PAGE_SIZE - 1);
1213 		n = DIV_ROUND_UP(len, PAGE_SIZE);
1214 		p = get_pages_array(n);
1215 		if (!p)
1216 			return -ENOMEM;
1217 		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
1218 		if (unlikely(res < 0)) {
1219 			kvfree(p);
1220 			return res;
1221 		}
1222 		*pages = p;
1223 		return (res == n ? len : res * PAGE_SIZE) - *start;
1224 	0;}),({
1225 		/* can't be more than PAGE_SIZE */
1226 		*start = v.bv_offset;
1227 		*pages = p = get_pages_array(1);
1228 		if (!p)
1229 			return -ENOMEM;
1230 		get_page(*p = v.bv_page);
1231 		return v.bv_len;
1232 	}),({
1233 		return -EFAULT;
1234 	})
1235 	)
1236 	return 0;
1237 }
1238 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1239 
1240 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1241 			       struct iov_iter *i)
1242 {
1243 	char *to = addr;
1244 	__wsum sum, next;
1245 	size_t off = 0;
1246 	sum = *csum;
1247 	if (unlikely(i->type & ITER_PIPE)) {
1248 		WARN_ON(1);
1249 		return 0;
1250 	}
1251 	iterate_and_advance(i, bytes, v, ({
1252 		int err = 0;
1253 		next = csum_and_copy_from_user(v.iov_base,
1254 					       (to += v.iov_len) - v.iov_len,
1255 					       v.iov_len, 0, &err);
1256 		if (!err) {
1257 			sum = csum_block_add(sum, next, off);
1258 			off += v.iov_len;
1259 		}
1260 		err ? v.iov_len : 0;
1261 	}), ({
1262 		char *p = kmap_atomic(v.bv_page);
1263 		next = csum_partial_copy_nocheck(p + v.bv_offset,
1264 						 (to += v.bv_len) - v.bv_len,
1265 						 v.bv_len, 0);
1266 		kunmap_atomic(p);
1267 		sum = csum_block_add(sum, next, off);
1268 		off += v.bv_len;
1269 	}),({
1270 		next = csum_partial_copy_nocheck(v.iov_base,
1271 						 (to += v.iov_len) - v.iov_len,
1272 						 v.iov_len, 0);
1273 		sum = csum_block_add(sum, next, off);
1274 		off += v.iov_len;
1275 	})
1276 	)
1277 	*csum = sum;
1278 	return bytes;
1279 }
1280 EXPORT_SYMBOL(csum_and_copy_from_iter);
1281 
1282 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1283 			       struct iov_iter *i)
1284 {
1285 	char *to = addr;
1286 	__wsum sum, next;
1287 	size_t off = 0;
1288 	sum = *csum;
1289 	if (unlikely(i->type & ITER_PIPE)) {
1290 		WARN_ON(1);
1291 		return false;
1292 	}
1293 	if (unlikely(i->count < bytes))
1294 		return false;
1295 	iterate_all_kinds(i, bytes, v, ({
1296 		int err = 0;
1297 		next = csum_and_copy_from_user(v.iov_base,
1298 					       (to += v.iov_len) - v.iov_len,
1299 					       v.iov_len, 0, &err);
1300 		if (err)
1301 			return false;
1302 		sum = csum_block_add(sum, next, off);
1303 		off += v.iov_len;
1304 		0;
1305 	}), ({
1306 		char *p = kmap_atomic(v.bv_page);
1307 		next = csum_partial_copy_nocheck(p + v.bv_offset,
1308 						 (to += v.bv_len) - v.bv_len,
1309 						 v.bv_len, 0);
1310 		kunmap_atomic(p);
1311 		sum = csum_block_add(sum, next, off);
1312 		off += v.bv_len;
1313 	}),({
1314 		next = csum_partial_copy_nocheck(v.iov_base,
1315 						 (to += v.iov_len) - v.iov_len,
1316 						 v.iov_len, 0);
1317 		sum = csum_block_add(sum, next, off);
1318 		off += v.iov_len;
1319 	})
1320 	)
1321 	*csum = sum;
1322 	iov_iter_advance(i, bytes);
1323 	return true;
1324 }
1325 EXPORT_SYMBOL(csum_and_copy_from_iter_full);
1326 
1327 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
1328 			     struct iov_iter *i)
1329 {
1330 	const char *from = addr;
1331 	__wsum sum, next;
1332 	size_t off = 0;
1333 	sum = *csum;
1334 	if (unlikely(i->type & ITER_PIPE)) {
1335 		WARN_ON(1);	/* for now */
1336 		return 0;
1337 	}
1338 	iterate_and_advance(i, bytes, v, ({
1339 		int err = 0;
1340 		next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1341 					     v.iov_base,
1342 					     v.iov_len, 0, &err);
1343 		if (!err) {
1344 			sum = csum_block_add(sum, next, off);
1345 			off += v.iov_len;
1346 		}
1347 		err ? v.iov_len : 0;
1348 	}), ({
1349 		char *p = kmap_atomic(v.bv_page);
1350 		next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
1351 						 p + v.bv_offset,
1352 						 v.bv_len, 0);
1353 		kunmap_atomic(p);
1354 		sum = csum_block_add(sum, next, off);
1355 		off += v.bv_len;
1356 	}),({
1357 		next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
1358 						 v.iov_base,
1359 						 v.iov_len, 0);
1360 		sum = csum_block_add(sum, next, off);
1361 		off += v.iov_len;
1362 	})
1363 	)
1364 	*csum = sum;
1365 	return bytes;
1366 }
1367 EXPORT_SYMBOL(csum_and_copy_to_iter);
1368 
1369 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1370 {
1371 	size_t size = i->count;
1372 	int npages = 0;
1373 
1374 	if (!size)
1375 		return 0;
1376 
1377 	if (unlikely(i->type & ITER_PIPE)) {
1378 		struct pipe_inode_info *pipe = i->pipe;
1379 		size_t off;
1380 		int idx;
1381 
1382 		if (!sanity(i))
1383 			return 0;
1384 
1385 		data_start(i, &idx, &off);
1386 		/* some of this one + all after this one */
1387 		npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
1388 		if (npages >= maxpages)
1389 			return maxpages;
1390 	} else iterate_all_kinds(i, size, v, ({
1391 		unsigned long p = (unsigned long)v.iov_base;
1392 		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1393 			- p / PAGE_SIZE;
1394 		if (npages >= maxpages)
1395 			return maxpages;
1396 	0;}),({
1397 		npages++;
1398 		if (npages >= maxpages)
1399 			return maxpages;
1400 	}),({
1401 		unsigned long p = (unsigned long)v.iov_base;
1402 		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1403 			- p / PAGE_SIZE;
1404 		if (npages >= maxpages)
1405 			return maxpages;
1406 	})
1407 	)
1408 	return npages;
1409 }
1410 EXPORT_SYMBOL(iov_iter_npages);
1411 
1412 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1413 {
1414 	*new = *old;
1415 	if (unlikely(new->type & ITER_PIPE)) {
1416 		WARN_ON(1);
1417 		return NULL;
1418 	}
1419 	if (new->type & ITER_BVEC)
1420 		return new->bvec = kmemdup(new->bvec,
1421 				    new->nr_segs * sizeof(struct bio_vec),
1422 				    flags);
1423 	else
1424 		/* iovec and kvec have identical layout */
1425 		return new->iov = kmemdup(new->iov,
1426 				   new->nr_segs * sizeof(struct iovec),
1427 				   flags);
1428 }
1429 EXPORT_SYMBOL(dup_iter);
1430 
1431 /**
1432  * import_iovec() - Copy an array of &struct iovec from userspace
1433  *     into the kernel, check that it is valid, and initialize a new
1434  *     &struct iov_iter iterator to access it.
1435  *
1436  * @type: One of %READ or %WRITE.
1437  * @uvector: Pointer to the userspace array.
1438  * @nr_segs: Number of elements in userspace array.
1439  * @fast_segs: Number of elements in @iov.
1440  * @iov: (input and output parameter) Pointer to pointer to (usually small
1441  *     on-stack) kernel array.
1442  * @i: Pointer to iterator that will be initialized on success.
1443  *
1444  * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1445  * then this function places %NULL in *@iov on return. Otherwise, a new
1446  * array will be allocated and the result placed in *@iov. This means that
1447  * the caller may call kfree() on *@iov regardless of whether the small
1448  * on-stack array was used or not (and regardless of whether this function
1449  * returns an error or not).
1450  *
1451  * Return: 0 on success or negative error code on error.
1452  */
1453 int import_iovec(int type, const struct iovec __user * uvector,
1454 		 unsigned nr_segs, unsigned fast_segs,
1455 		 struct iovec **iov, struct iov_iter *i)
1456 {
1457 	ssize_t n;
1458 	struct iovec *p;
1459 	n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1460 				  *iov, &p);
1461 	if (n < 0) {
1462 		if (p != *iov)
1463 			kfree(p);
1464 		*iov = NULL;
1465 		return n;
1466 	}
1467 	iov_iter_init(i, type, p, nr_segs, n);
1468 	*iov = p == *iov ? NULL : p;
1469 	return 0;
1470 }
1471 EXPORT_SYMBOL(import_iovec);
1472 
1473 #ifdef CONFIG_COMPAT
1474 #include <linux/compat.h>
1475 
1476 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
1477 		 unsigned nr_segs, unsigned fast_segs,
1478 		 struct iovec **iov, struct iov_iter *i)
1479 {
1480 	ssize_t n;
1481 	struct iovec *p;
1482 	n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1483 				  *iov, &p);
1484 	if (n < 0) {
1485 		if (p != *iov)
1486 			kfree(p);
1487 		*iov = NULL;
1488 		return n;
1489 	}
1490 	iov_iter_init(i, type, p, nr_segs, n);
1491 	*iov = p == *iov ? NULL : p;
1492 	return 0;
1493 }
1494 #endif
1495 
1496 int import_single_range(int rw, void __user *buf, size_t len,
1497 		 struct iovec *iov, struct iov_iter *i)
1498 {
1499 	if (len > MAX_RW_COUNT)
1500 		len = MAX_RW_COUNT;
1501 	if (unlikely(!access_ok(!rw, buf, len)))
1502 		return -EFAULT;
1503 
1504 	iov->iov_base = buf;
1505 	iov->iov_len = len;
1506 	iov_iter_init(i, rw, iov, 1, len);
1507 	return 0;
1508 }
1509 EXPORT_SYMBOL(import_single_range);
1510 
1511 int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
1512 			    int (*f)(struct kvec *vec, void *context),
1513 			    void *context)
1514 {
1515 	struct kvec w;
1516 	int err = -EINVAL;
1517 	if (!bytes)
1518 		return 0;
1519 
1520 	iterate_all_kinds(i, bytes, v, -EINVAL, ({
1521 		w.iov_base = kmap(v.bv_page) + v.bv_offset;
1522 		w.iov_len = v.bv_len;
1523 		err = f(&w, context);
1524 		kunmap(v.bv_page);
1525 		err;}), ({
1526 		w = v;
1527 		err = f(&w, context);})
1528 	)
1529 	return err;
1530 }
1531 EXPORT_SYMBOL(iov_iter_for_each_range);
1532