xref: /openbmc/linux/lib/iov_iter.c (revision 83268fa6)
1 #include <linux/export.h>
2 #include <linux/bvec.h>
3 #include <linux/uio.h>
4 #include <linux/pagemap.h>
5 #include <linux/slab.h>
6 #include <linux/vmalloc.h>
7 #include <linux/splice.h>
8 #include <net/checksum.h>
9 
10 #define PIPE_PARANOIA /* for now */
11 
12 #define iterate_iovec(i, n, __v, __p, skip, STEP) {	\
13 	size_t left;					\
14 	size_t wanted = n;				\
15 	__p = i->iov;					\
16 	__v.iov_len = min(n, __p->iov_len - skip);	\
17 	if (likely(__v.iov_len)) {			\
18 		__v.iov_base = __p->iov_base + skip;	\
19 		left = (STEP);				\
20 		__v.iov_len -= left;			\
21 		skip += __v.iov_len;			\
22 		n -= __v.iov_len;			\
23 	} else {					\
24 		left = 0;				\
25 	}						\
26 	while (unlikely(!left && n)) {			\
27 		__p++;					\
28 		__v.iov_len = min(n, __p->iov_len);	\
29 		if (unlikely(!__v.iov_len))		\
30 			continue;			\
31 		__v.iov_base = __p->iov_base;		\
32 		left = (STEP);				\
33 		__v.iov_len -= left;			\
34 		skip = __v.iov_len;			\
35 		n -= __v.iov_len;			\
36 	}						\
37 	n = wanted - n;					\
38 }
39 
40 #define iterate_kvec(i, n, __v, __p, skip, STEP) {	\
41 	size_t wanted = n;				\
42 	__p = i->kvec;					\
43 	__v.iov_len = min(n, __p->iov_len - skip);	\
44 	if (likely(__v.iov_len)) {			\
45 		__v.iov_base = __p->iov_base + skip;	\
46 		(void)(STEP);				\
47 		skip += __v.iov_len;			\
48 		n -= __v.iov_len;			\
49 	}						\
50 	while (unlikely(n)) {				\
51 		__p++;					\
52 		__v.iov_len = min(n, __p->iov_len);	\
53 		if (unlikely(!__v.iov_len))		\
54 			continue;			\
55 		__v.iov_base = __p->iov_base;		\
56 		(void)(STEP);				\
57 		skip = __v.iov_len;			\
58 		n -= __v.iov_len;			\
59 	}						\
60 	n = wanted;					\
61 }
62 
63 #define iterate_bvec(i, n, __v, __bi, skip, STEP) {	\
64 	struct bvec_iter __start;			\
65 	__start.bi_size = n;				\
66 	__start.bi_bvec_done = skip;			\
67 	__start.bi_idx = 0;				\
68 	for_each_bvec(__v, i->bvec, __bi, __start) {	\
69 		if (!__v.bv_len)			\
70 			continue;			\
71 		(void)(STEP);				\
72 	}						\
73 }
74 
75 #define iterate_all_kinds(i, n, v, I, B, K) {			\
76 	if (likely(n)) {					\
77 		size_t skip = i->iov_offset;			\
78 		if (unlikely(i->type & ITER_BVEC)) {		\
79 			struct bio_vec v;			\
80 			struct bvec_iter __bi;			\
81 			iterate_bvec(i, n, v, __bi, skip, (B))	\
82 		} else if (unlikely(i->type & ITER_KVEC)) {	\
83 			const struct kvec *kvec;		\
84 			struct kvec v;				\
85 			iterate_kvec(i, n, v, kvec, skip, (K))	\
86 		} else if (unlikely(i->type & ITER_DISCARD)) {	\
87 		} else {					\
88 			const struct iovec *iov;		\
89 			struct iovec v;				\
90 			iterate_iovec(i, n, v, iov, skip, (I))	\
91 		}						\
92 	}							\
93 }
94 
95 #define iterate_and_advance(i, n, v, I, B, K) {			\
96 	if (unlikely(i->count < n))				\
97 		n = i->count;					\
98 	if (i->count) {						\
99 		size_t skip = i->iov_offset;			\
100 		if (unlikely(i->type & ITER_BVEC)) {		\
101 			const struct bio_vec *bvec = i->bvec;	\
102 			struct bio_vec v;			\
103 			struct bvec_iter __bi;			\
104 			iterate_bvec(i, n, v, __bi, skip, (B))	\
105 			i->bvec = __bvec_iter_bvec(i->bvec, __bi);	\
106 			i->nr_segs -= i->bvec - bvec;		\
107 			skip = __bi.bi_bvec_done;		\
108 		} else if (unlikely(i->type & ITER_KVEC)) {	\
109 			const struct kvec *kvec;		\
110 			struct kvec v;				\
111 			iterate_kvec(i, n, v, kvec, skip, (K))	\
112 			if (skip == kvec->iov_len) {		\
113 				kvec++;				\
114 				skip = 0;			\
115 			}					\
116 			i->nr_segs -= kvec - i->kvec;		\
117 			i->kvec = kvec;				\
118 		} else if (unlikely(i->type & ITER_DISCARD)) {	\
119 			skip += n;				\
120 		} else {					\
121 			const struct iovec *iov;		\
122 			struct iovec v;				\
123 			iterate_iovec(i, n, v, iov, skip, (I))	\
124 			if (skip == iov->iov_len) {		\
125 				iov++;				\
126 				skip = 0;			\
127 			}					\
128 			i->nr_segs -= iov - i->iov;		\
129 			i->iov = iov;				\
130 		}						\
131 		i->count -= n;					\
132 		i->iov_offset = skip;				\
133 	}							\
134 }
135 
136 static int copyout(void __user *to, const void *from, size_t n)
137 {
138 	if (access_ok(VERIFY_WRITE, to, n)) {
139 		kasan_check_read(from, n);
140 		n = raw_copy_to_user(to, from, n);
141 	}
142 	return n;
143 }
144 
145 static int copyin(void *to, const void __user *from, size_t n)
146 {
147 	if (access_ok(VERIFY_READ, from, n)) {
148 		kasan_check_write(to, n);
149 		n = raw_copy_from_user(to, from, n);
150 	}
151 	return n;
152 }
153 
154 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
155 			 struct iov_iter *i)
156 {
157 	size_t skip, copy, left, wanted;
158 	const struct iovec *iov;
159 	char __user *buf;
160 	void *kaddr, *from;
161 
162 	if (unlikely(bytes > i->count))
163 		bytes = i->count;
164 
165 	if (unlikely(!bytes))
166 		return 0;
167 
168 	might_fault();
169 	wanted = bytes;
170 	iov = i->iov;
171 	skip = i->iov_offset;
172 	buf = iov->iov_base + skip;
173 	copy = min(bytes, iov->iov_len - skip);
174 
175 	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
176 		kaddr = kmap_atomic(page);
177 		from = kaddr + offset;
178 
179 		/* first chunk, usually the only one */
180 		left = copyout(buf, from, copy);
181 		copy -= left;
182 		skip += copy;
183 		from += copy;
184 		bytes -= copy;
185 
186 		while (unlikely(!left && bytes)) {
187 			iov++;
188 			buf = iov->iov_base;
189 			copy = min(bytes, iov->iov_len);
190 			left = copyout(buf, from, copy);
191 			copy -= left;
192 			skip = copy;
193 			from += copy;
194 			bytes -= copy;
195 		}
196 		if (likely(!bytes)) {
197 			kunmap_atomic(kaddr);
198 			goto done;
199 		}
200 		offset = from - kaddr;
201 		buf += copy;
202 		kunmap_atomic(kaddr);
203 		copy = min(bytes, iov->iov_len - skip);
204 	}
205 	/* Too bad - revert to non-atomic kmap */
206 
207 	kaddr = kmap(page);
208 	from = kaddr + offset;
209 	left = copyout(buf, from, copy);
210 	copy -= left;
211 	skip += copy;
212 	from += copy;
213 	bytes -= copy;
214 	while (unlikely(!left && bytes)) {
215 		iov++;
216 		buf = iov->iov_base;
217 		copy = min(bytes, iov->iov_len);
218 		left = copyout(buf, from, copy);
219 		copy -= left;
220 		skip = copy;
221 		from += copy;
222 		bytes -= copy;
223 	}
224 	kunmap(page);
225 
226 done:
227 	if (skip == iov->iov_len) {
228 		iov++;
229 		skip = 0;
230 	}
231 	i->count -= wanted - bytes;
232 	i->nr_segs -= iov - i->iov;
233 	i->iov = iov;
234 	i->iov_offset = skip;
235 	return wanted - bytes;
236 }
237 
238 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
239 			 struct iov_iter *i)
240 {
241 	size_t skip, copy, left, wanted;
242 	const struct iovec *iov;
243 	char __user *buf;
244 	void *kaddr, *to;
245 
246 	if (unlikely(bytes > i->count))
247 		bytes = i->count;
248 
249 	if (unlikely(!bytes))
250 		return 0;
251 
252 	might_fault();
253 	wanted = bytes;
254 	iov = i->iov;
255 	skip = i->iov_offset;
256 	buf = iov->iov_base + skip;
257 	copy = min(bytes, iov->iov_len - skip);
258 
259 	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
260 		kaddr = kmap_atomic(page);
261 		to = kaddr + offset;
262 
263 		/* first chunk, usually the only one */
264 		left = copyin(to, buf, copy);
265 		copy -= left;
266 		skip += copy;
267 		to += copy;
268 		bytes -= copy;
269 
270 		while (unlikely(!left && bytes)) {
271 			iov++;
272 			buf = iov->iov_base;
273 			copy = min(bytes, iov->iov_len);
274 			left = copyin(to, buf, copy);
275 			copy -= left;
276 			skip = copy;
277 			to += copy;
278 			bytes -= copy;
279 		}
280 		if (likely(!bytes)) {
281 			kunmap_atomic(kaddr);
282 			goto done;
283 		}
284 		offset = to - kaddr;
285 		buf += copy;
286 		kunmap_atomic(kaddr);
287 		copy = min(bytes, iov->iov_len - skip);
288 	}
289 	/* Too bad - revert to non-atomic kmap */
290 
291 	kaddr = kmap(page);
292 	to = kaddr + offset;
293 	left = copyin(to, buf, copy);
294 	copy -= left;
295 	skip += copy;
296 	to += copy;
297 	bytes -= copy;
298 	while (unlikely(!left && bytes)) {
299 		iov++;
300 		buf = iov->iov_base;
301 		copy = min(bytes, iov->iov_len);
302 		left = copyin(to, buf, copy);
303 		copy -= left;
304 		skip = copy;
305 		to += copy;
306 		bytes -= copy;
307 	}
308 	kunmap(page);
309 
310 done:
311 	if (skip == iov->iov_len) {
312 		iov++;
313 		skip = 0;
314 	}
315 	i->count -= wanted - bytes;
316 	i->nr_segs -= iov - i->iov;
317 	i->iov = iov;
318 	i->iov_offset = skip;
319 	return wanted - bytes;
320 }
321 
322 #ifdef PIPE_PARANOIA
323 static bool sanity(const struct iov_iter *i)
324 {
325 	struct pipe_inode_info *pipe = i->pipe;
326 	int idx = i->idx;
327 	int next = pipe->curbuf + pipe->nrbufs;
328 	if (i->iov_offset) {
329 		struct pipe_buffer *p;
330 		if (unlikely(!pipe->nrbufs))
331 			goto Bad;	// pipe must be non-empty
332 		if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
333 			goto Bad;	// must be at the last buffer...
334 
335 		p = &pipe->bufs[idx];
336 		if (unlikely(p->offset + p->len != i->iov_offset))
337 			goto Bad;	// ... at the end of segment
338 	} else {
339 		if (idx != (next & (pipe->buffers - 1)))
340 			goto Bad;	// must be right after the last buffer
341 	}
342 	return true;
343 Bad:
344 	printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
345 	printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
346 			pipe->curbuf, pipe->nrbufs, pipe->buffers);
347 	for (idx = 0; idx < pipe->buffers; idx++)
348 		printk(KERN_ERR "[%p %p %d %d]\n",
349 			pipe->bufs[idx].ops,
350 			pipe->bufs[idx].page,
351 			pipe->bufs[idx].offset,
352 			pipe->bufs[idx].len);
353 	WARN_ON(1);
354 	return false;
355 }
356 #else
357 #define sanity(i) true
358 #endif
359 
360 static inline int next_idx(int idx, struct pipe_inode_info *pipe)
361 {
362 	return (idx + 1) & (pipe->buffers - 1);
363 }
364 
365 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
366 			 struct iov_iter *i)
367 {
368 	struct pipe_inode_info *pipe = i->pipe;
369 	struct pipe_buffer *buf;
370 	size_t off;
371 	int idx;
372 
373 	if (unlikely(bytes > i->count))
374 		bytes = i->count;
375 
376 	if (unlikely(!bytes))
377 		return 0;
378 
379 	if (!sanity(i))
380 		return 0;
381 
382 	off = i->iov_offset;
383 	idx = i->idx;
384 	buf = &pipe->bufs[idx];
385 	if (off) {
386 		if (offset == off && buf->page == page) {
387 			/* merge with the last one */
388 			buf->len += bytes;
389 			i->iov_offset += bytes;
390 			goto out;
391 		}
392 		idx = next_idx(idx, pipe);
393 		buf = &pipe->bufs[idx];
394 	}
395 	if (idx == pipe->curbuf && pipe->nrbufs)
396 		return 0;
397 	pipe->nrbufs++;
398 	buf->ops = &page_cache_pipe_buf_ops;
399 	get_page(buf->page = page);
400 	buf->offset = offset;
401 	buf->len = bytes;
402 	i->iov_offset = offset + bytes;
403 	i->idx = idx;
404 out:
405 	i->count -= bytes;
406 	return bytes;
407 }
408 
409 /*
410  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
411  * bytes.  For each iovec, fault in each page that constitutes the iovec.
412  *
413  * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
414  * because it is an invalid address).
415  */
416 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
417 {
418 	size_t skip = i->iov_offset;
419 	const struct iovec *iov;
420 	int err;
421 	struct iovec v;
422 
423 	if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
424 		iterate_iovec(i, bytes, v, iov, skip, ({
425 			err = fault_in_pages_readable(v.iov_base, v.iov_len);
426 			if (unlikely(err))
427 			return err;
428 		0;}))
429 	}
430 	return 0;
431 }
432 EXPORT_SYMBOL(iov_iter_fault_in_readable);
433 
434 void iov_iter_init(struct iov_iter *i, unsigned int direction,
435 			const struct iovec *iov, unsigned long nr_segs,
436 			size_t count)
437 {
438 	WARN_ON(direction & ~(READ | WRITE));
439 	direction &= READ | WRITE;
440 
441 	/* It will get better.  Eventually... */
442 	if (uaccess_kernel()) {
443 		i->type = ITER_KVEC | direction;
444 		i->kvec = (struct kvec *)iov;
445 	} else {
446 		i->type = ITER_IOVEC | direction;
447 		i->iov = iov;
448 	}
449 	i->nr_segs = nr_segs;
450 	i->iov_offset = 0;
451 	i->count = count;
452 }
453 EXPORT_SYMBOL(iov_iter_init);
454 
455 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
456 {
457 	char *from = kmap_atomic(page);
458 	memcpy(to, from + offset, len);
459 	kunmap_atomic(from);
460 }
461 
462 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
463 {
464 	char *to = kmap_atomic(page);
465 	memcpy(to + offset, from, len);
466 	kunmap_atomic(to);
467 }
468 
469 static void memzero_page(struct page *page, size_t offset, size_t len)
470 {
471 	char *addr = kmap_atomic(page);
472 	memset(addr + offset, 0, len);
473 	kunmap_atomic(addr);
474 }
475 
476 static inline bool allocated(struct pipe_buffer *buf)
477 {
478 	return buf->ops == &default_pipe_buf_ops;
479 }
480 
481 static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
482 {
483 	size_t off = i->iov_offset;
484 	int idx = i->idx;
485 	if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
486 		idx = next_idx(idx, i->pipe);
487 		off = 0;
488 	}
489 	*idxp = idx;
490 	*offp = off;
491 }
492 
493 static size_t push_pipe(struct iov_iter *i, size_t size,
494 			int *idxp, size_t *offp)
495 {
496 	struct pipe_inode_info *pipe = i->pipe;
497 	size_t off;
498 	int idx;
499 	ssize_t left;
500 
501 	if (unlikely(size > i->count))
502 		size = i->count;
503 	if (unlikely(!size))
504 		return 0;
505 
506 	left = size;
507 	data_start(i, &idx, &off);
508 	*idxp = idx;
509 	*offp = off;
510 	if (off) {
511 		left -= PAGE_SIZE - off;
512 		if (left <= 0) {
513 			pipe->bufs[idx].len += size;
514 			return size;
515 		}
516 		pipe->bufs[idx].len = PAGE_SIZE;
517 		idx = next_idx(idx, pipe);
518 	}
519 	while (idx != pipe->curbuf || !pipe->nrbufs) {
520 		struct page *page = alloc_page(GFP_USER);
521 		if (!page)
522 			break;
523 		pipe->nrbufs++;
524 		pipe->bufs[idx].ops = &default_pipe_buf_ops;
525 		pipe->bufs[idx].page = page;
526 		pipe->bufs[idx].offset = 0;
527 		if (left <= PAGE_SIZE) {
528 			pipe->bufs[idx].len = left;
529 			return size;
530 		}
531 		pipe->bufs[idx].len = PAGE_SIZE;
532 		left -= PAGE_SIZE;
533 		idx = next_idx(idx, pipe);
534 	}
535 	return size - left;
536 }
537 
538 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
539 				struct iov_iter *i)
540 {
541 	struct pipe_inode_info *pipe = i->pipe;
542 	size_t n, off;
543 	int idx;
544 
545 	if (!sanity(i))
546 		return 0;
547 
548 	bytes = n = push_pipe(i, bytes, &idx, &off);
549 	if (unlikely(!n))
550 		return 0;
551 	for ( ; n; idx = next_idx(idx, pipe), off = 0) {
552 		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
553 		memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
554 		i->idx = idx;
555 		i->iov_offset = off + chunk;
556 		n -= chunk;
557 		addr += chunk;
558 	}
559 	i->count -= bytes;
560 	return bytes;
561 }
562 
563 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
564 {
565 	const char *from = addr;
566 	if (unlikely(iov_iter_is_pipe(i)))
567 		return copy_pipe_to_iter(addr, bytes, i);
568 	if (iter_is_iovec(i))
569 		might_fault();
570 	iterate_and_advance(i, bytes, v,
571 		copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
572 		memcpy_to_page(v.bv_page, v.bv_offset,
573 			       (from += v.bv_len) - v.bv_len, v.bv_len),
574 		memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
575 	)
576 
577 	return bytes;
578 }
579 EXPORT_SYMBOL(_copy_to_iter);
580 
581 #ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
582 static int copyout_mcsafe(void __user *to, const void *from, size_t n)
583 {
584 	if (access_ok(VERIFY_WRITE, to, n)) {
585 		kasan_check_read(from, n);
586 		n = copy_to_user_mcsafe((__force void *) to, from, n);
587 	}
588 	return n;
589 }
590 
591 static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
592 		const char *from, size_t len)
593 {
594 	unsigned long ret;
595 	char *to;
596 
597 	to = kmap_atomic(page);
598 	ret = memcpy_mcsafe(to + offset, from, len);
599 	kunmap_atomic(to);
600 
601 	return ret;
602 }
603 
604 static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
605 				struct iov_iter *i)
606 {
607 	struct pipe_inode_info *pipe = i->pipe;
608 	size_t n, off, xfer = 0;
609 	int idx;
610 
611 	if (!sanity(i))
612 		return 0;
613 
614 	bytes = n = push_pipe(i, bytes, &idx, &off);
615 	if (unlikely(!n))
616 		return 0;
617 	for ( ; n; idx = next_idx(idx, pipe), off = 0) {
618 		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
619 		unsigned long rem;
620 
621 		rem = memcpy_mcsafe_to_page(pipe->bufs[idx].page, off, addr,
622 				chunk);
623 		i->idx = idx;
624 		i->iov_offset = off + chunk - rem;
625 		xfer += chunk - rem;
626 		if (rem)
627 			break;
628 		n -= chunk;
629 		addr += chunk;
630 	}
631 	i->count -= xfer;
632 	return xfer;
633 }
634 
635 /**
636  * _copy_to_iter_mcsafe - copy to user with source-read error exception handling
637  * @addr: source kernel address
638  * @bytes: total transfer length
639  * @iter: destination iterator
640  *
641  * The pmem driver arranges for filesystem-dax to use this facility via
642  * dax_copy_to_iter() for protecting read/write to persistent memory.
643  * Unless / until an architecture can guarantee identical performance
644  * between _copy_to_iter_mcsafe() and _copy_to_iter() it would be a
645  * performance regression to switch more users to the mcsafe version.
646  *
647  * Otherwise, the main differences between this and typical _copy_to_iter().
648  *
649  * * Typical tail/residue handling after a fault retries the copy
650  *   byte-by-byte until the fault happens again. Re-triggering machine
651  *   checks is potentially fatal so the implementation uses source
652  *   alignment and poison alignment assumptions to avoid re-triggering
653  *   hardware exceptions.
654  *
655  * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
656  *   Compare to copy_to_iter() where only ITER_IOVEC attempts might return
657  *   a short copy.
658  *
659  * See MCSAFE_TEST for self-test.
660  */
661 size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
662 {
663 	const char *from = addr;
664 	unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
665 
666 	if (unlikely(iov_iter_is_pipe(i)))
667 		return copy_pipe_to_iter_mcsafe(addr, bytes, i);
668 	if (iter_is_iovec(i))
669 		might_fault();
670 	iterate_and_advance(i, bytes, v,
671 		copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
672 		({
673 		rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset,
674                                (from += v.bv_len) - v.bv_len, v.bv_len);
675 		if (rem) {
676 			curr_addr = (unsigned long) from;
677 			bytes = curr_addr - s_addr - rem;
678 			return bytes;
679 		}
680 		}),
681 		({
682 		rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len,
683 				v.iov_len);
684 		if (rem) {
685 			curr_addr = (unsigned long) from;
686 			bytes = curr_addr - s_addr - rem;
687 			return bytes;
688 		}
689 		})
690 	)
691 
692 	return bytes;
693 }
694 EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe);
695 #endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */
696 
697 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
698 {
699 	char *to = addr;
700 	if (unlikely(iov_iter_is_pipe(i))) {
701 		WARN_ON(1);
702 		return 0;
703 	}
704 	if (iter_is_iovec(i))
705 		might_fault();
706 	iterate_and_advance(i, bytes, v,
707 		copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
708 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
709 				 v.bv_offset, v.bv_len),
710 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
711 	)
712 
713 	return bytes;
714 }
715 EXPORT_SYMBOL(_copy_from_iter);
716 
717 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
718 {
719 	char *to = addr;
720 	if (unlikely(iov_iter_is_pipe(i))) {
721 		WARN_ON(1);
722 		return false;
723 	}
724 	if (unlikely(i->count < bytes))
725 		return false;
726 
727 	if (iter_is_iovec(i))
728 		might_fault();
729 	iterate_all_kinds(i, bytes, v, ({
730 		if (copyin((to += v.iov_len) - v.iov_len,
731 				      v.iov_base, v.iov_len))
732 			return false;
733 		0;}),
734 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
735 				 v.bv_offset, v.bv_len),
736 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
737 	)
738 
739 	iov_iter_advance(i, bytes);
740 	return true;
741 }
742 EXPORT_SYMBOL(_copy_from_iter_full);
743 
744 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
745 {
746 	char *to = addr;
747 	if (unlikely(iov_iter_is_pipe(i))) {
748 		WARN_ON(1);
749 		return 0;
750 	}
751 	iterate_and_advance(i, bytes, v,
752 		__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
753 					 v.iov_base, v.iov_len),
754 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
755 				 v.bv_offset, v.bv_len),
756 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
757 	)
758 
759 	return bytes;
760 }
761 EXPORT_SYMBOL(_copy_from_iter_nocache);
762 
763 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
764 /**
765  * _copy_from_iter_flushcache - write destination through cpu cache
766  * @addr: destination kernel address
767  * @bytes: total transfer length
768  * @iter: source iterator
769  *
770  * The pmem driver arranges for filesystem-dax to use this facility via
771  * dax_copy_from_iter() for ensuring that writes to persistent memory
772  * are flushed through the CPU cache. It is differentiated from
773  * _copy_from_iter_nocache() in that guarantees all data is flushed for
774  * all iterator types. The _copy_from_iter_nocache() only attempts to
775  * bypass the cache for the ITER_IOVEC case, and on some archs may use
776  * instructions that strand dirty-data in the cache.
777  */
778 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
779 {
780 	char *to = addr;
781 	if (unlikely(iov_iter_is_pipe(i))) {
782 		WARN_ON(1);
783 		return 0;
784 	}
785 	iterate_and_advance(i, bytes, v,
786 		__copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
787 					 v.iov_base, v.iov_len),
788 		memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
789 				 v.bv_offset, v.bv_len),
790 		memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
791 			v.iov_len)
792 	)
793 
794 	return bytes;
795 }
796 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
797 #endif
798 
799 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
800 {
801 	char *to = addr;
802 	if (unlikely(iov_iter_is_pipe(i))) {
803 		WARN_ON(1);
804 		return false;
805 	}
806 	if (unlikely(i->count < bytes))
807 		return false;
808 	iterate_all_kinds(i, bytes, v, ({
809 		if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
810 					     v.iov_base, v.iov_len))
811 			return false;
812 		0;}),
813 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
814 				 v.bv_offset, v.bv_len),
815 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
816 	)
817 
818 	iov_iter_advance(i, bytes);
819 	return true;
820 }
821 EXPORT_SYMBOL(_copy_from_iter_full_nocache);
822 
823 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
824 {
825 	struct page *head = compound_head(page);
826 	size_t v = n + offset + page_address(page) - page_address(head);
827 
828 	if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
829 		return true;
830 	WARN_ON(1);
831 	return false;
832 }
833 
834 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
835 			 struct iov_iter *i)
836 {
837 	if (unlikely(!page_copy_sane(page, offset, bytes)))
838 		return 0;
839 	if (i->type & (ITER_BVEC|ITER_KVEC)) {
840 		void *kaddr = kmap_atomic(page);
841 		size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
842 		kunmap_atomic(kaddr);
843 		return wanted;
844 	} else if (unlikely(iov_iter_is_discard(i)))
845 		return bytes;
846 	else if (likely(!iov_iter_is_pipe(i)))
847 		return copy_page_to_iter_iovec(page, offset, bytes, i);
848 	else
849 		return copy_page_to_iter_pipe(page, offset, bytes, i);
850 }
851 EXPORT_SYMBOL(copy_page_to_iter);
852 
853 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
854 			 struct iov_iter *i)
855 {
856 	if (unlikely(!page_copy_sane(page, offset, bytes)))
857 		return 0;
858 	if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
859 		WARN_ON(1);
860 		return 0;
861 	}
862 	if (i->type & (ITER_BVEC|ITER_KVEC)) {
863 		void *kaddr = kmap_atomic(page);
864 		size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
865 		kunmap_atomic(kaddr);
866 		return wanted;
867 	} else
868 		return copy_page_from_iter_iovec(page, offset, bytes, i);
869 }
870 EXPORT_SYMBOL(copy_page_from_iter);
871 
872 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
873 {
874 	struct pipe_inode_info *pipe = i->pipe;
875 	size_t n, off;
876 	int idx;
877 
878 	if (!sanity(i))
879 		return 0;
880 
881 	bytes = n = push_pipe(i, bytes, &idx, &off);
882 	if (unlikely(!n))
883 		return 0;
884 
885 	for ( ; n; idx = next_idx(idx, pipe), off = 0) {
886 		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
887 		memzero_page(pipe->bufs[idx].page, off, chunk);
888 		i->idx = idx;
889 		i->iov_offset = off + chunk;
890 		n -= chunk;
891 	}
892 	i->count -= bytes;
893 	return bytes;
894 }
895 
896 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
897 {
898 	if (unlikely(iov_iter_is_pipe(i)))
899 		return pipe_zero(bytes, i);
900 	iterate_and_advance(i, bytes, v,
901 		clear_user(v.iov_base, v.iov_len),
902 		memzero_page(v.bv_page, v.bv_offset, v.bv_len),
903 		memset(v.iov_base, 0, v.iov_len)
904 	)
905 
906 	return bytes;
907 }
908 EXPORT_SYMBOL(iov_iter_zero);
909 
910 size_t iov_iter_copy_from_user_atomic(struct page *page,
911 		struct iov_iter *i, unsigned long offset, size_t bytes)
912 {
913 	char *kaddr = kmap_atomic(page), *p = kaddr + offset;
914 	if (unlikely(!page_copy_sane(page, offset, bytes))) {
915 		kunmap_atomic(kaddr);
916 		return 0;
917 	}
918 	if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
919 		kunmap_atomic(kaddr);
920 		WARN_ON(1);
921 		return 0;
922 	}
923 	iterate_all_kinds(i, bytes, v,
924 		copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
925 		memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
926 				 v.bv_offset, v.bv_len),
927 		memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
928 	)
929 	kunmap_atomic(kaddr);
930 	return bytes;
931 }
932 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
933 
934 static inline void pipe_truncate(struct iov_iter *i)
935 {
936 	struct pipe_inode_info *pipe = i->pipe;
937 	if (pipe->nrbufs) {
938 		size_t off = i->iov_offset;
939 		int idx = i->idx;
940 		int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
941 		if (off) {
942 			pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
943 			idx = next_idx(idx, pipe);
944 			nrbufs++;
945 		}
946 		while (pipe->nrbufs > nrbufs) {
947 			pipe_buf_release(pipe, &pipe->bufs[idx]);
948 			idx = next_idx(idx, pipe);
949 			pipe->nrbufs--;
950 		}
951 	}
952 }
953 
954 static void pipe_advance(struct iov_iter *i, size_t size)
955 {
956 	struct pipe_inode_info *pipe = i->pipe;
957 	if (unlikely(i->count < size))
958 		size = i->count;
959 	if (size) {
960 		struct pipe_buffer *buf;
961 		size_t off = i->iov_offset, left = size;
962 		int idx = i->idx;
963 		if (off) /* make it relative to the beginning of buffer */
964 			left += off - pipe->bufs[idx].offset;
965 		while (1) {
966 			buf = &pipe->bufs[idx];
967 			if (left <= buf->len)
968 				break;
969 			left -= buf->len;
970 			idx = next_idx(idx, pipe);
971 		}
972 		i->idx = idx;
973 		i->iov_offset = buf->offset + left;
974 	}
975 	i->count -= size;
976 	/* ... and discard everything past that point */
977 	pipe_truncate(i);
978 }
979 
980 void iov_iter_advance(struct iov_iter *i, size_t size)
981 {
982 	if (unlikely(iov_iter_is_pipe(i))) {
983 		pipe_advance(i, size);
984 		return;
985 	}
986 	if (unlikely(iov_iter_is_discard(i))) {
987 		i->count -= size;
988 		return;
989 	}
990 	iterate_and_advance(i, size, v, 0, 0, 0)
991 }
992 EXPORT_SYMBOL(iov_iter_advance);
993 
994 void iov_iter_revert(struct iov_iter *i, size_t unroll)
995 {
996 	if (!unroll)
997 		return;
998 	if (WARN_ON(unroll > MAX_RW_COUNT))
999 		return;
1000 	i->count += unroll;
1001 	if (unlikely(iov_iter_is_pipe(i))) {
1002 		struct pipe_inode_info *pipe = i->pipe;
1003 		int idx = i->idx;
1004 		size_t off = i->iov_offset;
1005 		while (1) {
1006 			size_t n = off - pipe->bufs[idx].offset;
1007 			if (unroll < n) {
1008 				off -= unroll;
1009 				break;
1010 			}
1011 			unroll -= n;
1012 			if (!unroll && idx == i->start_idx) {
1013 				off = 0;
1014 				break;
1015 			}
1016 			if (!idx--)
1017 				idx = pipe->buffers - 1;
1018 			off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
1019 		}
1020 		i->iov_offset = off;
1021 		i->idx = idx;
1022 		pipe_truncate(i);
1023 		return;
1024 	}
1025 	if (unlikely(iov_iter_is_discard(i)))
1026 		return;
1027 	if (unroll <= i->iov_offset) {
1028 		i->iov_offset -= unroll;
1029 		return;
1030 	}
1031 	unroll -= i->iov_offset;
1032 	if (iov_iter_is_bvec(i)) {
1033 		const struct bio_vec *bvec = i->bvec;
1034 		while (1) {
1035 			size_t n = (--bvec)->bv_len;
1036 			i->nr_segs++;
1037 			if (unroll <= n) {
1038 				i->bvec = bvec;
1039 				i->iov_offset = n - unroll;
1040 				return;
1041 			}
1042 			unroll -= n;
1043 		}
1044 	} else { /* same logics for iovec and kvec */
1045 		const struct iovec *iov = i->iov;
1046 		while (1) {
1047 			size_t n = (--iov)->iov_len;
1048 			i->nr_segs++;
1049 			if (unroll <= n) {
1050 				i->iov = iov;
1051 				i->iov_offset = n - unroll;
1052 				return;
1053 			}
1054 			unroll -= n;
1055 		}
1056 	}
1057 }
1058 EXPORT_SYMBOL(iov_iter_revert);
1059 
1060 /*
1061  * Return the count of just the current iov_iter segment.
1062  */
1063 size_t iov_iter_single_seg_count(const struct iov_iter *i)
1064 {
1065 	if (unlikely(iov_iter_is_pipe(i)))
1066 		return i->count;	// it is a silly place, anyway
1067 	if (i->nr_segs == 1)
1068 		return i->count;
1069 	if (unlikely(iov_iter_is_discard(i)))
1070 		return i->count;
1071 	else if (iov_iter_is_bvec(i))
1072 		return min(i->count, i->bvec->bv_len - i->iov_offset);
1073 	else
1074 		return min(i->count, i->iov->iov_len - i->iov_offset);
1075 }
1076 EXPORT_SYMBOL(iov_iter_single_seg_count);
1077 
1078 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
1079 			const struct kvec *kvec, unsigned long nr_segs,
1080 			size_t count)
1081 {
1082 	WARN_ON(direction & ~(READ | WRITE));
1083 	i->type = ITER_KVEC | (direction & (READ | WRITE));
1084 	i->kvec = kvec;
1085 	i->nr_segs = nr_segs;
1086 	i->iov_offset = 0;
1087 	i->count = count;
1088 }
1089 EXPORT_SYMBOL(iov_iter_kvec);
1090 
1091 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1092 			const struct bio_vec *bvec, unsigned long nr_segs,
1093 			size_t count)
1094 {
1095 	WARN_ON(direction & ~(READ | WRITE));
1096 	i->type = ITER_BVEC | (direction & (READ | WRITE));
1097 	i->bvec = bvec;
1098 	i->nr_segs = nr_segs;
1099 	i->iov_offset = 0;
1100 	i->count = count;
1101 }
1102 EXPORT_SYMBOL(iov_iter_bvec);
1103 
1104 void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1105 			struct pipe_inode_info *pipe,
1106 			size_t count)
1107 {
1108 	BUG_ON(direction != READ);
1109 	WARN_ON(pipe->nrbufs == pipe->buffers);
1110 	i->type = ITER_PIPE | READ;
1111 	i->pipe = pipe;
1112 	i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1113 	i->iov_offset = 0;
1114 	i->count = count;
1115 	i->start_idx = i->idx;
1116 }
1117 EXPORT_SYMBOL(iov_iter_pipe);
1118 
1119 /**
1120  * iov_iter_discard - Initialise an I/O iterator that discards data
1121  * @i: The iterator to initialise.
1122  * @direction: The direction of the transfer.
1123  * @count: The size of the I/O buffer in bytes.
1124  *
1125  * Set up an I/O iterator that just discards everything that's written to it.
1126  * It's only available as a READ iterator.
1127  */
1128 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1129 {
1130 	BUG_ON(direction != READ);
1131 	i->type = ITER_DISCARD | READ;
1132 	i->count = count;
1133 	i->iov_offset = 0;
1134 }
1135 EXPORT_SYMBOL(iov_iter_discard);
1136 
1137 unsigned long iov_iter_alignment(const struct iov_iter *i)
1138 {
1139 	unsigned long res = 0;
1140 	size_t size = i->count;
1141 
1142 	if (unlikely(iov_iter_is_pipe(i))) {
1143 		if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
1144 			return size | i->iov_offset;
1145 		return size;
1146 	}
1147 	iterate_all_kinds(i, size, v,
1148 		(res |= (unsigned long)v.iov_base | v.iov_len, 0),
1149 		res |= v.bv_offset | v.bv_len,
1150 		res |= (unsigned long)v.iov_base | v.iov_len
1151 	)
1152 	return res;
1153 }
1154 EXPORT_SYMBOL(iov_iter_alignment);
1155 
1156 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1157 {
1158 	unsigned long res = 0;
1159 	size_t size = i->count;
1160 
1161 	if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1162 		WARN_ON(1);
1163 		return ~0U;
1164 	}
1165 
1166 	iterate_all_kinds(i, size, v,
1167 		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
1168 			(size != v.iov_len ? size : 0), 0),
1169 		(res |= (!res ? 0 : (unsigned long)v.bv_offset) |
1170 			(size != v.bv_len ? size : 0)),
1171 		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
1172 			(size != v.iov_len ? size : 0))
1173 		);
1174 	return res;
1175 }
1176 EXPORT_SYMBOL(iov_iter_gap_alignment);
1177 
1178 static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1179 				size_t maxsize,
1180 				struct page **pages,
1181 				int idx,
1182 				size_t *start)
1183 {
1184 	struct pipe_inode_info *pipe = i->pipe;
1185 	ssize_t n = push_pipe(i, maxsize, &idx, start);
1186 	if (!n)
1187 		return -EFAULT;
1188 
1189 	maxsize = n;
1190 	n += *start;
1191 	while (n > 0) {
1192 		get_page(*pages++ = pipe->bufs[idx].page);
1193 		idx = next_idx(idx, pipe);
1194 		n -= PAGE_SIZE;
1195 	}
1196 
1197 	return maxsize;
1198 }
1199 
1200 static ssize_t pipe_get_pages(struct iov_iter *i,
1201 		   struct page **pages, size_t maxsize, unsigned maxpages,
1202 		   size_t *start)
1203 {
1204 	unsigned npages;
1205 	size_t capacity;
1206 	int idx;
1207 
1208 	if (!maxsize)
1209 		return 0;
1210 
1211 	if (!sanity(i))
1212 		return -EFAULT;
1213 
1214 	data_start(i, &idx, start);
1215 	/* some of this one + all after this one */
1216 	npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
1217 	capacity = min(npages,maxpages) * PAGE_SIZE - *start;
1218 
1219 	return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
1220 }
1221 
1222 ssize_t iov_iter_get_pages(struct iov_iter *i,
1223 		   struct page **pages, size_t maxsize, unsigned maxpages,
1224 		   size_t *start)
1225 {
1226 	if (maxsize > i->count)
1227 		maxsize = i->count;
1228 
1229 	if (unlikely(iov_iter_is_pipe(i)))
1230 		return pipe_get_pages(i, pages, maxsize, maxpages, start);
1231 	if (unlikely(iov_iter_is_discard(i)))
1232 		return -EFAULT;
1233 
1234 	iterate_all_kinds(i, maxsize, v, ({
1235 		unsigned long addr = (unsigned long)v.iov_base;
1236 		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1237 		int n;
1238 		int res;
1239 
1240 		if (len > maxpages * PAGE_SIZE)
1241 			len = maxpages * PAGE_SIZE;
1242 		addr &= ~(PAGE_SIZE - 1);
1243 		n = DIV_ROUND_UP(len, PAGE_SIZE);
1244 		res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, pages);
1245 		if (unlikely(res < 0))
1246 			return res;
1247 		return (res == n ? len : res * PAGE_SIZE) - *start;
1248 	0;}),({
1249 		/* can't be more than PAGE_SIZE */
1250 		*start = v.bv_offset;
1251 		get_page(*pages = v.bv_page);
1252 		return v.bv_len;
1253 	}),({
1254 		return -EFAULT;
1255 	})
1256 	)
1257 	return 0;
1258 }
1259 EXPORT_SYMBOL(iov_iter_get_pages);
1260 
1261 static struct page **get_pages_array(size_t n)
1262 {
1263 	return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1264 }
1265 
1266 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1267 		   struct page ***pages, size_t maxsize,
1268 		   size_t *start)
1269 {
1270 	struct page **p;
1271 	ssize_t n;
1272 	int idx;
1273 	int npages;
1274 
1275 	if (!maxsize)
1276 		return 0;
1277 
1278 	if (!sanity(i))
1279 		return -EFAULT;
1280 
1281 	data_start(i, &idx, start);
1282 	/* some of this one + all after this one */
1283 	npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
1284 	n = npages * PAGE_SIZE - *start;
1285 	if (maxsize > n)
1286 		maxsize = n;
1287 	else
1288 		npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1289 	p = get_pages_array(npages);
1290 	if (!p)
1291 		return -ENOMEM;
1292 	n = __pipe_get_pages(i, maxsize, p, idx, start);
1293 	if (n > 0)
1294 		*pages = p;
1295 	else
1296 		kvfree(p);
1297 	return n;
1298 }
1299 
1300 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1301 		   struct page ***pages, size_t maxsize,
1302 		   size_t *start)
1303 {
1304 	struct page **p;
1305 
1306 	if (maxsize > i->count)
1307 		maxsize = i->count;
1308 
1309 	if (unlikely(iov_iter_is_pipe(i)))
1310 		return pipe_get_pages_alloc(i, pages, maxsize, start);
1311 	if (unlikely(iov_iter_is_discard(i)))
1312 		return -EFAULT;
1313 
1314 	iterate_all_kinds(i, maxsize, v, ({
1315 		unsigned long addr = (unsigned long)v.iov_base;
1316 		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1317 		int n;
1318 		int res;
1319 
1320 		addr &= ~(PAGE_SIZE - 1);
1321 		n = DIV_ROUND_UP(len, PAGE_SIZE);
1322 		p = get_pages_array(n);
1323 		if (!p)
1324 			return -ENOMEM;
1325 		res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, p);
1326 		if (unlikely(res < 0)) {
1327 			kvfree(p);
1328 			return res;
1329 		}
1330 		*pages = p;
1331 		return (res == n ? len : res * PAGE_SIZE) - *start;
1332 	0;}),({
1333 		/* can't be more than PAGE_SIZE */
1334 		*start = v.bv_offset;
1335 		*pages = p = get_pages_array(1);
1336 		if (!p)
1337 			return -ENOMEM;
1338 		get_page(*p = v.bv_page);
1339 		return v.bv_len;
1340 	}),({
1341 		return -EFAULT;
1342 	})
1343 	)
1344 	return 0;
1345 }
1346 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1347 
1348 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1349 			       struct iov_iter *i)
1350 {
1351 	char *to = addr;
1352 	__wsum sum, next;
1353 	size_t off = 0;
1354 	sum = *csum;
1355 	if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1356 		WARN_ON(1);
1357 		return 0;
1358 	}
1359 	iterate_and_advance(i, bytes, v, ({
1360 		int err = 0;
1361 		next = csum_and_copy_from_user(v.iov_base,
1362 					       (to += v.iov_len) - v.iov_len,
1363 					       v.iov_len, 0, &err);
1364 		if (!err) {
1365 			sum = csum_block_add(sum, next, off);
1366 			off += v.iov_len;
1367 		}
1368 		err ? v.iov_len : 0;
1369 	}), ({
1370 		char *p = kmap_atomic(v.bv_page);
1371 		next = csum_partial_copy_nocheck(p + v.bv_offset,
1372 						 (to += v.bv_len) - v.bv_len,
1373 						 v.bv_len, 0);
1374 		kunmap_atomic(p);
1375 		sum = csum_block_add(sum, next, off);
1376 		off += v.bv_len;
1377 	}),({
1378 		next = csum_partial_copy_nocheck(v.iov_base,
1379 						 (to += v.iov_len) - v.iov_len,
1380 						 v.iov_len, 0);
1381 		sum = csum_block_add(sum, next, off);
1382 		off += v.iov_len;
1383 	})
1384 	)
1385 	*csum = sum;
1386 	return bytes;
1387 }
1388 EXPORT_SYMBOL(csum_and_copy_from_iter);
1389 
1390 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1391 			       struct iov_iter *i)
1392 {
1393 	char *to = addr;
1394 	__wsum sum, next;
1395 	size_t off = 0;
1396 	sum = *csum;
1397 	if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1398 		WARN_ON(1);
1399 		return false;
1400 	}
1401 	if (unlikely(i->count < bytes))
1402 		return false;
1403 	iterate_all_kinds(i, bytes, v, ({
1404 		int err = 0;
1405 		next = csum_and_copy_from_user(v.iov_base,
1406 					       (to += v.iov_len) - v.iov_len,
1407 					       v.iov_len, 0, &err);
1408 		if (err)
1409 			return false;
1410 		sum = csum_block_add(sum, next, off);
1411 		off += v.iov_len;
1412 		0;
1413 	}), ({
1414 		char *p = kmap_atomic(v.bv_page);
1415 		next = csum_partial_copy_nocheck(p + v.bv_offset,
1416 						 (to += v.bv_len) - v.bv_len,
1417 						 v.bv_len, 0);
1418 		kunmap_atomic(p);
1419 		sum = csum_block_add(sum, next, off);
1420 		off += v.bv_len;
1421 	}),({
1422 		next = csum_partial_copy_nocheck(v.iov_base,
1423 						 (to += v.iov_len) - v.iov_len,
1424 						 v.iov_len, 0);
1425 		sum = csum_block_add(sum, next, off);
1426 		off += v.iov_len;
1427 	})
1428 	)
1429 	*csum = sum;
1430 	iov_iter_advance(i, bytes);
1431 	return true;
1432 }
1433 EXPORT_SYMBOL(csum_and_copy_from_iter_full);
1434 
1435 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
1436 			     struct iov_iter *i)
1437 {
1438 	const char *from = addr;
1439 	__wsum sum, next;
1440 	size_t off = 0;
1441 	sum = *csum;
1442 	if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1443 		WARN_ON(1);	/* for now */
1444 		return 0;
1445 	}
1446 	iterate_and_advance(i, bytes, v, ({
1447 		int err = 0;
1448 		next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1449 					     v.iov_base,
1450 					     v.iov_len, 0, &err);
1451 		if (!err) {
1452 			sum = csum_block_add(sum, next, off);
1453 			off += v.iov_len;
1454 		}
1455 		err ? v.iov_len : 0;
1456 	}), ({
1457 		char *p = kmap_atomic(v.bv_page);
1458 		next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
1459 						 p + v.bv_offset,
1460 						 v.bv_len, 0);
1461 		kunmap_atomic(p);
1462 		sum = csum_block_add(sum, next, off);
1463 		off += v.bv_len;
1464 	}),({
1465 		next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
1466 						 v.iov_base,
1467 						 v.iov_len, 0);
1468 		sum = csum_block_add(sum, next, off);
1469 		off += v.iov_len;
1470 	})
1471 	)
1472 	*csum = sum;
1473 	return bytes;
1474 }
1475 EXPORT_SYMBOL(csum_and_copy_to_iter);
1476 
1477 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1478 {
1479 	size_t size = i->count;
1480 	int npages = 0;
1481 
1482 	if (!size)
1483 		return 0;
1484 	if (unlikely(iov_iter_is_discard(i)))
1485 		return 0;
1486 
1487 	if (unlikely(iov_iter_is_pipe(i))) {
1488 		struct pipe_inode_info *pipe = i->pipe;
1489 		size_t off;
1490 		int idx;
1491 
1492 		if (!sanity(i))
1493 			return 0;
1494 
1495 		data_start(i, &idx, &off);
1496 		/* some of this one + all after this one */
1497 		npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
1498 		if (npages >= maxpages)
1499 			return maxpages;
1500 	} else iterate_all_kinds(i, size, v, ({
1501 		unsigned long p = (unsigned long)v.iov_base;
1502 		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1503 			- p / PAGE_SIZE;
1504 		if (npages >= maxpages)
1505 			return maxpages;
1506 	0;}),({
1507 		npages++;
1508 		if (npages >= maxpages)
1509 			return maxpages;
1510 	}),({
1511 		unsigned long p = (unsigned long)v.iov_base;
1512 		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1513 			- p / PAGE_SIZE;
1514 		if (npages >= maxpages)
1515 			return maxpages;
1516 	})
1517 	)
1518 	return npages;
1519 }
1520 EXPORT_SYMBOL(iov_iter_npages);
1521 
1522 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1523 {
1524 	*new = *old;
1525 	if (unlikely(iov_iter_is_pipe(new))) {
1526 		WARN_ON(1);
1527 		return NULL;
1528 	}
1529 	if (unlikely(iov_iter_is_discard(new)))
1530 		return NULL;
1531 	if (iov_iter_is_bvec(new))
1532 		return new->bvec = kmemdup(new->bvec,
1533 				    new->nr_segs * sizeof(struct bio_vec),
1534 				    flags);
1535 	else
1536 		/* iovec and kvec have identical layout */
1537 		return new->iov = kmemdup(new->iov,
1538 				   new->nr_segs * sizeof(struct iovec),
1539 				   flags);
1540 }
1541 EXPORT_SYMBOL(dup_iter);
1542 
1543 /**
1544  * import_iovec() - Copy an array of &struct iovec from userspace
1545  *     into the kernel, check that it is valid, and initialize a new
1546  *     &struct iov_iter iterator to access it.
1547  *
1548  * @type: One of %READ or %WRITE.
1549  * @uvector: Pointer to the userspace array.
1550  * @nr_segs: Number of elements in userspace array.
1551  * @fast_segs: Number of elements in @iov.
1552  * @iov: (input and output parameter) Pointer to pointer to (usually small
1553  *     on-stack) kernel array.
1554  * @i: Pointer to iterator that will be initialized on success.
1555  *
1556  * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1557  * then this function places %NULL in *@iov on return. Otherwise, a new
1558  * array will be allocated and the result placed in *@iov. This means that
1559  * the caller may call kfree() on *@iov regardless of whether the small
1560  * on-stack array was used or not (and regardless of whether this function
1561  * returns an error or not).
1562  *
1563  * Return: 0 on success or negative error code on error.
1564  */
1565 int import_iovec(int type, const struct iovec __user * uvector,
1566 		 unsigned nr_segs, unsigned fast_segs,
1567 		 struct iovec **iov, struct iov_iter *i)
1568 {
1569 	ssize_t n;
1570 	struct iovec *p;
1571 	n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1572 				  *iov, &p);
1573 	if (n < 0) {
1574 		if (p != *iov)
1575 			kfree(p);
1576 		*iov = NULL;
1577 		return n;
1578 	}
1579 	iov_iter_init(i, type, p, nr_segs, n);
1580 	*iov = p == *iov ? NULL : p;
1581 	return 0;
1582 }
1583 EXPORT_SYMBOL(import_iovec);
1584 
1585 #ifdef CONFIG_COMPAT
1586 #include <linux/compat.h>
1587 
1588 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
1589 		 unsigned nr_segs, unsigned fast_segs,
1590 		 struct iovec **iov, struct iov_iter *i)
1591 {
1592 	ssize_t n;
1593 	struct iovec *p;
1594 	n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1595 				  *iov, &p);
1596 	if (n < 0) {
1597 		if (p != *iov)
1598 			kfree(p);
1599 		*iov = NULL;
1600 		return n;
1601 	}
1602 	iov_iter_init(i, type, p, nr_segs, n);
1603 	*iov = p == *iov ? NULL : p;
1604 	return 0;
1605 }
1606 #endif
1607 
1608 int import_single_range(int rw, void __user *buf, size_t len,
1609 		 struct iovec *iov, struct iov_iter *i)
1610 {
1611 	if (len > MAX_RW_COUNT)
1612 		len = MAX_RW_COUNT;
1613 	if (unlikely(!access_ok(!rw, buf, len)))
1614 		return -EFAULT;
1615 
1616 	iov->iov_base = buf;
1617 	iov->iov_len = len;
1618 	iov_iter_init(i, rw, iov, 1, len);
1619 	return 0;
1620 }
1621 EXPORT_SYMBOL(import_single_range);
1622 
1623 int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
1624 			    int (*f)(struct kvec *vec, void *context),
1625 			    void *context)
1626 {
1627 	struct kvec w;
1628 	int err = -EINVAL;
1629 	if (!bytes)
1630 		return 0;
1631 
1632 	iterate_all_kinds(i, bytes, v, -EINVAL, ({
1633 		w.iov_base = kmap(v.bv_page) + v.bv_offset;
1634 		w.iov_len = v.bv_len;
1635 		err = f(&w, context);
1636 		kunmap(v.bv_page);
1637 		err;}), ({
1638 		w = v;
1639 		err = f(&w, context);})
1640 	)
1641 	return err;
1642 }
1643 EXPORT_SYMBOL(iov_iter_for_each_range);
1644