xref: /openbmc/linux/lib/iov_iter.c (revision 0985b65d)
1 #include <linux/export.h>
2 #include <linux/uio.h>
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
6 #include <net/checksum.h>
7 
8 #define iterate_iovec(i, n, __v, __p, skip, STEP) {	\
9 	size_t left;					\
10 	size_t wanted = n;				\
11 	__p = i->iov;					\
12 	__v.iov_len = min(n, __p->iov_len - skip);	\
13 	if (likely(__v.iov_len)) {			\
14 		__v.iov_base = __p->iov_base + skip;	\
15 		left = (STEP);				\
16 		__v.iov_len -= left;			\
17 		skip += __v.iov_len;			\
18 		n -= __v.iov_len;			\
19 	} else {					\
20 		left = 0;				\
21 	}						\
22 	while (unlikely(!left && n)) {			\
23 		__p++;					\
24 		__v.iov_len = min(n, __p->iov_len);	\
25 		if (unlikely(!__v.iov_len))		\
26 			continue;			\
27 		__v.iov_base = __p->iov_base;		\
28 		left = (STEP);				\
29 		__v.iov_len -= left;			\
30 		skip = __v.iov_len;			\
31 		n -= __v.iov_len;			\
32 	}						\
33 	n = wanted - n;					\
34 }
35 
36 #define iterate_kvec(i, n, __v, __p, skip, STEP) {	\
37 	size_t wanted = n;				\
38 	__p = i->kvec;					\
39 	__v.iov_len = min(n, __p->iov_len - skip);	\
40 	if (likely(__v.iov_len)) {			\
41 		__v.iov_base = __p->iov_base + skip;	\
42 		(void)(STEP);				\
43 		skip += __v.iov_len;			\
44 		n -= __v.iov_len;			\
45 	}						\
46 	while (unlikely(n)) {				\
47 		__p++;					\
48 		__v.iov_len = min(n, __p->iov_len);	\
49 		if (unlikely(!__v.iov_len))		\
50 			continue;			\
51 		__v.iov_base = __p->iov_base;		\
52 		(void)(STEP);				\
53 		skip = __v.iov_len;			\
54 		n -= __v.iov_len;			\
55 	}						\
56 	n = wanted;					\
57 }
58 
59 #define iterate_bvec(i, n, __v, __p, skip, STEP) {	\
60 	size_t wanted = n;				\
61 	__p = i->bvec;					\
62 	__v.bv_len = min_t(size_t, n, __p->bv_len - skip);	\
63 	if (likely(__v.bv_len)) {			\
64 		__v.bv_page = __p->bv_page;		\
65 		__v.bv_offset = __p->bv_offset + skip; 	\
66 		(void)(STEP);				\
67 		skip += __v.bv_len;			\
68 		n -= __v.bv_len;			\
69 	}						\
70 	while (unlikely(n)) {				\
71 		__p++;					\
72 		__v.bv_len = min_t(size_t, n, __p->bv_len);	\
73 		if (unlikely(!__v.bv_len))		\
74 			continue;			\
75 		__v.bv_page = __p->bv_page;		\
76 		__v.bv_offset = __p->bv_offset;		\
77 		(void)(STEP);				\
78 		skip = __v.bv_len;			\
79 		n -= __v.bv_len;			\
80 	}						\
81 	n = wanted;					\
82 }
83 
84 #define iterate_all_kinds(i, n, v, I, B, K) {			\
85 	size_t skip = i->iov_offset;				\
86 	if (unlikely(i->type & ITER_BVEC)) {			\
87 		const struct bio_vec *bvec;			\
88 		struct bio_vec v;				\
89 		iterate_bvec(i, n, v, bvec, skip, (B))		\
90 	} else if (unlikely(i->type & ITER_KVEC)) {		\
91 		const struct kvec *kvec;			\
92 		struct kvec v;					\
93 		iterate_kvec(i, n, v, kvec, skip, (K))		\
94 	} else {						\
95 		const struct iovec *iov;			\
96 		struct iovec v;					\
97 		iterate_iovec(i, n, v, iov, skip, (I))		\
98 	}							\
99 }
100 
101 #define iterate_and_advance(i, n, v, I, B, K) {			\
102 	if (unlikely(i->count < n))				\
103 		n = i->count;					\
104 	if (i->count) {						\
105 		size_t skip = i->iov_offset;			\
106 		if (unlikely(i->type & ITER_BVEC)) {		\
107 			const struct bio_vec *bvec;		\
108 			struct bio_vec v;			\
109 			iterate_bvec(i, n, v, bvec, skip, (B))	\
110 			if (skip == bvec->bv_len) {		\
111 				bvec++;				\
112 				skip = 0;			\
113 			}					\
114 			i->nr_segs -= bvec - i->bvec;		\
115 			i->bvec = bvec;				\
116 		} else if (unlikely(i->type & ITER_KVEC)) {	\
117 			const struct kvec *kvec;		\
118 			struct kvec v;				\
119 			iterate_kvec(i, n, v, kvec, skip, (K))	\
120 			if (skip == kvec->iov_len) {		\
121 				kvec++;				\
122 				skip = 0;			\
123 			}					\
124 			i->nr_segs -= kvec - i->kvec;		\
125 			i->kvec = kvec;				\
126 		} else {					\
127 			const struct iovec *iov;		\
128 			struct iovec v;				\
129 			iterate_iovec(i, n, v, iov, skip, (I))	\
130 			if (skip == iov->iov_len) {		\
131 				iov++;				\
132 				skip = 0;			\
133 			}					\
134 			i->nr_segs -= iov - i->iov;		\
135 			i->iov = iov;				\
136 		}						\
137 		i->count -= n;					\
138 		i->iov_offset = skip;				\
139 	}							\
140 }
141 
142 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
143 			 struct iov_iter *i)
144 {
145 	size_t skip, copy, left, wanted;
146 	const struct iovec *iov;
147 	char __user *buf;
148 	void *kaddr, *from;
149 
150 	if (unlikely(bytes > i->count))
151 		bytes = i->count;
152 
153 	if (unlikely(!bytes))
154 		return 0;
155 
156 	wanted = bytes;
157 	iov = i->iov;
158 	skip = i->iov_offset;
159 	buf = iov->iov_base + skip;
160 	copy = min(bytes, iov->iov_len - skip);
161 
162 	if (!fault_in_pages_writeable(buf, copy)) {
163 		kaddr = kmap_atomic(page);
164 		from = kaddr + offset;
165 
166 		/* first chunk, usually the only one */
167 		left = __copy_to_user_inatomic(buf, from, copy);
168 		copy -= left;
169 		skip += copy;
170 		from += copy;
171 		bytes -= copy;
172 
173 		while (unlikely(!left && bytes)) {
174 			iov++;
175 			buf = iov->iov_base;
176 			copy = min(bytes, iov->iov_len);
177 			left = __copy_to_user_inatomic(buf, from, copy);
178 			copy -= left;
179 			skip = copy;
180 			from += copy;
181 			bytes -= copy;
182 		}
183 		if (likely(!bytes)) {
184 			kunmap_atomic(kaddr);
185 			goto done;
186 		}
187 		offset = from - kaddr;
188 		buf += copy;
189 		kunmap_atomic(kaddr);
190 		copy = min(bytes, iov->iov_len - skip);
191 	}
192 	/* Too bad - revert to non-atomic kmap */
193 	kaddr = kmap(page);
194 	from = kaddr + offset;
195 	left = __copy_to_user(buf, from, copy);
196 	copy -= left;
197 	skip += copy;
198 	from += copy;
199 	bytes -= copy;
200 	while (unlikely(!left && bytes)) {
201 		iov++;
202 		buf = iov->iov_base;
203 		copy = min(bytes, iov->iov_len);
204 		left = __copy_to_user(buf, from, copy);
205 		copy -= left;
206 		skip = copy;
207 		from += copy;
208 		bytes -= copy;
209 	}
210 	kunmap(page);
211 done:
212 	if (skip == iov->iov_len) {
213 		iov++;
214 		skip = 0;
215 	}
216 	i->count -= wanted - bytes;
217 	i->nr_segs -= iov - i->iov;
218 	i->iov = iov;
219 	i->iov_offset = skip;
220 	return wanted - bytes;
221 }
222 
223 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
224 			 struct iov_iter *i)
225 {
226 	size_t skip, copy, left, wanted;
227 	const struct iovec *iov;
228 	char __user *buf;
229 	void *kaddr, *to;
230 
231 	if (unlikely(bytes > i->count))
232 		bytes = i->count;
233 
234 	if (unlikely(!bytes))
235 		return 0;
236 
237 	wanted = bytes;
238 	iov = i->iov;
239 	skip = i->iov_offset;
240 	buf = iov->iov_base + skip;
241 	copy = min(bytes, iov->iov_len - skip);
242 
243 	if (!fault_in_pages_readable(buf, copy)) {
244 		kaddr = kmap_atomic(page);
245 		to = kaddr + offset;
246 
247 		/* first chunk, usually the only one */
248 		left = __copy_from_user_inatomic(to, buf, copy);
249 		copy -= left;
250 		skip += copy;
251 		to += copy;
252 		bytes -= copy;
253 
254 		while (unlikely(!left && bytes)) {
255 			iov++;
256 			buf = iov->iov_base;
257 			copy = min(bytes, iov->iov_len);
258 			left = __copy_from_user_inatomic(to, buf, copy);
259 			copy -= left;
260 			skip = copy;
261 			to += copy;
262 			bytes -= copy;
263 		}
264 		if (likely(!bytes)) {
265 			kunmap_atomic(kaddr);
266 			goto done;
267 		}
268 		offset = to - kaddr;
269 		buf += copy;
270 		kunmap_atomic(kaddr);
271 		copy = min(bytes, iov->iov_len - skip);
272 	}
273 	/* Too bad - revert to non-atomic kmap */
274 	kaddr = kmap(page);
275 	to = kaddr + offset;
276 	left = __copy_from_user(to, buf, copy);
277 	copy -= left;
278 	skip += copy;
279 	to += copy;
280 	bytes -= copy;
281 	while (unlikely(!left && bytes)) {
282 		iov++;
283 		buf = iov->iov_base;
284 		copy = min(bytes, iov->iov_len);
285 		left = __copy_from_user(to, buf, copy);
286 		copy -= left;
287 		skip = copy;
288 		to += copy;
289 		bytes -= copy;
290 	}
291 	kunmap(page);
292 done:
293 	if (skip == iov->iov_len) {
294 		iov++;
295 		skip = 0;
296 	}
297 	i->count -= wanted - bytes;
298 	i->nr_segs -= iov - i->iov;
299 	i->iov = iov;
300 	i->iov_offset = skip;
301 	return wanted - bytes;
302 }
303 
304 /*
305  * Fault in the first iovec of the given iov_iter, to a maximum length
306  * of bytes. Returns 0 on success, or non-zero if the memory could not be
307  * accessed (ie. because it is an invalid address).
308  *
309  * writev-intensive code may want this to prefault several iovecs -- that
310  * would be possible (callers must not rely on the fact that _only_ the
311  * first iovec will be faulted with the current implementation).
312  */
313 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
314 {
315 	if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
316 		char __user *buf = i->iov->iov_base + i->iov_offset;
317 		bytes = min(bytes, i->iov->iov_len - i->iov_offset);
318 		return fault_in_pages_readable(buf, bytes);
319 	}
320 	return 0;
321 }
322 EXPORT_SYMBOL(iov_iter_fault_in_readable);
323 
324 /*
325  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
326  * bytes.  For each iovec, fault in each page that constitutes the iovec.
327  *
328  * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
329  * because it is an invalid address).
330  */
331 int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
332 {
333 	size_t skip = i->iov_offset;
334 	const struct iovec *iov;
335 	int err;
336 	struct iovec v;
337 
338 	if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
339 		iterate_iovec(i, bytes, v, iov, skip, ({
340 			err = fault_in_multipages_readable(v.iov_base,
341 					v.iov_len);
342 			if (unlikely(err))
343 			return err;
344 		0;}))
345 	}
346 	return 0;
347 }
348 EXPORT_SYMBOL(iov_iter_fault_in_multipages_readable);
349 
350 void iov_iter_init(struct iov_iter *i, int direction,
351 			const struct iovec *iov, unsigned long nr_segs,
352 			size_t count)
353 {
354 	/* It will get better.  Eventually... */
355 	if (segment_eq(get_fs(), KERNEL_DS)) {
356 		direction |= ITER_KVEC;
357 		i->type = direction;
358 		i->kvec = (struct kvec *)iov;
359 	} else {
360 		i->type = direction;
361 		i->iov = iov;
362 	}
363 	i->nr_segs = nr_segs;
364 	i->iov_offset = 0;
365 	i->count = count;
366 }
367 EXPORT_SYMBOL(iov_iter_init);
368 
369 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
370 {
371 	char *from = kmap_atomic(page);
372 	memcpy(to, from + offset, len);
373 	kunmap_atomic(from);
374 }
375 
376 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
377 {
378 	char *to = kmap_atomic(page);
379 	memcpy(to + offset, from, len);
380 	kunmap_atomic(to);
381 }
382 
383 static void memzero_page(struct page *page, size_t offset, size_t len)
384 {
385 	char *addr = kmap_atomic(page);
386 	memset(addr + offset, 0, len);
387 	kunmap_atomic(addr);
388 }
389 
390 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
391 {
392 	const char *from = addr;
393 	iterate_and_advance(i, bytes, v,
394 		__copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
395 			       v.iov_len),
396 		memcpy_to_page(v.bv_page, v.bv_offset,
397 			       (from += v.bv_len) - v.bv_len, v.bv_len),
398 		memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
399 	)
400 
401 	return bytes;
402 }
403 EXPORT_SYMBOL(copy_to_iter);
404 
405 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
406 {
407 	char *to = addr;
408 	iterate_and_advance(i, bytes, v,
409 		__copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
410 				 v.iov_len),
411 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
412 				 v.bv_offset, v.bv_len),
413 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
414 	)
415 
416 	return bytes;
417 }
418 EXPORT_SYMBOL(copy_from_iter);
419 
420 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
421 {
422 	char *to = addr;
423 	iterate_and_advance(i, bytes, v,
424 		__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
425 					 v.iov_base, v.iov_len),
426 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
427 				 v.bv_offset, v.bv_len),
428 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
429 	)
430 
431 	return bytes;
432 }
433 EXPORT_SYMBOL(copy_from_iter_nocache);
434 
435 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
436 			 struct iov_iter *i)
437 {
438 	if (i->type & (ITER_BVEC|ITER_KVEC)) {
439 		void *kaddr = kmap_atomic(page);
440 		size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
441 		kunmap_atomic(kaddr);
442 		return wanted;
443 	} else
444 		return copy_page_to_iter_iovec(page, offset, bytes, i);
445 }
446 EXPORT_SYMBOL(copy_page_to_iter);
447 
448 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
449 			 struct iov_iter *i)
450 {
451 	if (i->type & (ITER_BVEC|ITER_KVEC)) {
452 		void *kaddr = kmap_atomic(page);
453 		size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
454 		kunmap_atomic(kaddr);
455 		return wanted;
456 	} else
457 		return copy_page_from_iter_iovec(page, offset, bytes, i);
458 }
459 EXPORT_SYMBOL(copy_page_from_iter);
460 
461 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
462 {
463 	iterate_and_advance(i, bytes, v,
464 		__clear_user(v.iov_base, v.iov_len),
465 		memzero_page(v.bv_page, v.bv_offset, v.bv_len),
466 		memset(v.iov_base, 0, v.iov_len)
467 	)
468 
469 	return bytes;
470 }
471 EXPORT_SYMBOL(iov_iter_zero);
472 
473 size_t iov_iter_copy_from_user_atomic(struct page *page,
474 		struct iov_iter *i, unsigned long offset, size_t bytes)
475 {
476 	char *kaddr = kmap_atomic(page), *p = kaddr + offset;
477 	iterate_all_kinds(i, bytes, v,
478 		__copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
479 					  v.iov_base, v.iov_len),
480 		memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
481 				 v.bv_offset, v.bv_len),
482 		memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
483 	)
484 	kunmap_atomic(kaddr);
485 	return bytes;
486 }
487 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
488 
489 void iov_iter_advance(struct iov_iter *i, size_t size)
490 {
491 	iterate_and_advance(i, size, v, 0, 0, 0)
492 }
493 EXPORT_SYMBOL(iov_iter_advance);
494 
495 /*
496  * Return the count of just the current iov_iter segment.
497  */
498 size_t iov_iter_single_seg_count(const struct iov_iter *i)
499 {
500 	if (i->nr_segs == 1)
501 		return i->count;
502 	else if (i->type & ITER_BVEC)
503 		return min(i->count, i->bvec->bv_len - i->iov_offset);
504 	else
505 		return min(i->count, i->iov->iov_len - i->iov_offset);
506 }
507 EXPORT_SYMBOL(iov_iter_single_seg_count);
508 
509 void iov_iter_kvec(struct iov_iter *i, int direction,
510 			const struct kvec *kvec, unsigned long nr_segs,
511 			size_t count)
512 {
513 	BUG_ON(!(direction & ITER_KVEC));
514 	i->type = direction;
515 	i->kvec = kvec;
516 	i->nr_segs = nr_segs;
517 	i->iov_offset = 0;
518 	i->count = count;
519 }
520 EXPORT_SYMBOL(iov_iter_kvec);
521 
522 void iov_iter_bvec(struct iov_iter *i, int direction,
523 			const struct bio_vec *bvec, unsigned long nr_segs,
524 			size_t count)
525 {
526 	BUG_ON(!(direction & ITER_BVEC));
527 	i->type = direction;
528 	i->bvec = bvec;
529 	i->nr_segs = nr_segs;
530 	i->iov_offset = 0;
531 	i->count = count;
532 }
533 EXPORT_SYMBOL(iov_iter_bvec);
534 
535 unsigned long iov_iter_alignment(const struct iov_iter *i)
536 {
537 	unsigned long res = 0;
538 	size_t size = i->count;
539 
540 	if (!size)
541 		return 0;
542 
543 	iterate_all_kinds(i, size, v,
544 		(res |= (unsigned long)v.iov_base | v.iov_len, 0),
545 		res |= v.bv_offset | v.bv_len,
546 		res |= (unsigned long)v.iov_base | v.iov_len
547 	)
548 	return res;
549 }
550 EXPORT_SYMBOL(iov_iter_alignment);
551 
552 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
553 {
554         unsigned long res = 0;
555 	size_t size = i->count;
556 	if (!size)
557 		return 0;
558 
559 	iterate_all_kinds(i, size, v,
560 		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
561 			(size != v.iov_len ? size : 0), 0),
562 		(res |= (!res ? 0 : (unsigned long)v.bv_offset) |
563 			(size != v.bv_len ? size : 0)),
564 		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
565 			(size != v.iov_len ? size : 0))
566 		);
567 		return res;
568 }
569 EXPORT_SYMBOL(iov_iter_gap_alignment);
570 
571 ssize_t iov_iter_get_pages(struct iov_iter *i,
572 		   struct page **pages, size_t maxsize, unsigned maxpages,
573 		   size_t *start)
574 {
575 	if (maxsize > i->count)
576 		maxsize = i->count;
577 
578 	if (!maxsize)
579 		return 0;
580 
581 	iterate_all_kinds(i, maxsize, v, ({
582 		unsigned long addr = (unsigned long)v.iov_base;
583 		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
584 		int n;
585 		int res;
586 
587 		if (len > maxpages * PAGE_SIZE)
588 			len = maxpages * PAGE_SIZE;
589 		addr &= ~(PAGE_SIZE - 1);
590 		n = DIV_ROUND_UP(len, PAGE_SIZE);
591 		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
592 		if (unlikely(res < 0))
593 			return res;
594 		return (res == n ? len : res * PAGE_SIZE) - *start;
595 	0;}),({
596 		/* can't be more than PAGE_SIZE */
597 		*start = v.bv_offset;
598 		get_page(*pages = v.bv_page);
599 		return v.bv_len;
600 	}),({
601 		return -EFAULT;
602 	})
603 	)
604 	return 0;
605 }
606 EXPORT_SYMBOL(iov_iter_get_pages);
607 
608 static struct page **get_pages_array(size_t n)
609 {
610 	struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
611 	if (!p)
612 		p = vmalloc(n * sizeof(struct page *));
613 	return p;
614 }
615 
616 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
617 		   struct page ***pages, size_t maxsize,
618 		   size_t *start)
619 {
620 	struct page **p;
621 
622 	if (maxsize > i->count)
623 		maxsize = i->count;
624 
625 	if (!maxsize)
626 		return 0;
627 
628 	iterate_all_kinds(i, maxsize, v, ({
629 		unsigned long addr = (unsigned long)v.iov_base;
630 		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
631 		int n;
632 		int res;
633 
634 		addr &= ~(PAGE_SIZE - 1);
635 		n = DIV_ROUND_UP(len, PAGE_SIZE);
636 		p = get_pages_array(n);
637 		if (!p)
638 			return -ENOMEM;
639 		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
640 		if (unlikely(res < 0)) {
641 			kvfree(p);
642 			return res;
643 		}
644 		*pages = p;
645 		return (res == n ? len : res * PAGE_SIZE) - *start;
646 	0;}),({
647 		/* can't be more than PAGE_SIZE */
648 		*start = v.bv_offset;
649 		*pages = p = get_pages_array(1);
650 		if (!p)
651 			return -ENOMEM;
652 		get_page(*p = v.bv_page);
653 		return v.bv_len;
654 	}),({
655 		return -EFAULT;
656 	})
657 	)
658 	return 0;
659 }
660 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
661 
662 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
663 			       struct iov_iter *i)
664 {
665 	char *to = addr;
666 	__wsum sum, next;
667 	size_t off = 0;
668 	sum = *csum;
669 	iterate_and_advance(i, bytes, v, ({
670 		int err = 0;
671 		next = csum_and_copy_from_user(v.iov_base,
672 					       (to += v.iov_len) - v.iov_len,
673 					       v.iov_len, 0, &err);
674 		if (!err) {
675 			sum = csum_block_add(sum, next, off);
676 			off += v.iov_len;
677 		}
678 		err ? v.iov_len : 0;
679 	}), ({
680 		char *p = kmap_atomic(v.bv_page);
681 		next = csum_partial_copy_nocheck(p + v.bv_offset,
682 						 (to += v.bv_len) - v.bv_len,
683 						 v.bv_len, 0);
684 		kunmap_atomic(p);
685 		sum = csum_block_add(sum, next, off);
686 		off += v.bv_len;
687 	}),({
688 		next = csum_partial_copy_nocheck(v.iov_base,
689 						 (to += v.iov_len) - v.iov_len,
690 						 v.iov_len, 0);
691 		sum = csum_block_add(sum, next, off);
692 		off += v.iov_len;
693 	})
694 	)
695 	*csum = sum;
696 	return bytes;
697 }
698 EXPORT_SYMBOL(csum_and_copy_from_iter);
699 
700 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
701 			     struct iov_iter *i)
702 {
703 	const char *from = addr;
704 	__wsum sum, next;
705 	size_t off = 0;
706 	sum = *csum;
707 	iterate_and_advance(i, bytes, v, ({
708 		int err = 0;
709 		next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
710 					     v.iov_base,
711 					     v.iov_len, 0, &err);
712 		if (!err) {
713 			sum = csum_block_add(sum, next, off);
714 			off += v.iov_len;
715 		}
716 		err ? v.iov_len : 0;
717 	}), ({
718 		char *p = kmap_atomic(v.bv_page);
719 		next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
720 						 p + v.bv_offset,
721 						 v.bv_len, 0);
722 		kunmap_atomic(p);
723 		sum = csum_block_add(sum, next, off);
724 		off += v.bv_len;
725 	}),({
726 		next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
727 						 v.iov_base,
728 						 v.iov_len, 0);
729 		sum = csum_block_add(sum, next, off);
730 		off += v.iov_len;
731 	})
732 	)
733 	*csum = sum;
734 	return bytes;
735 }
736 EXPORT_SYMBOL(csum_and_copy_to_iter);
737 
738 int iov_iter_npages(const struct iov_iter *i, int maxpages)
739 {
740 	size_t size = i->count;
741 	int npages = 0;
742 
743 	if (!size)
744 		return 0;
745 
746 	iterate_all_kinds(i, size, v, ({
747 		unsigned long p = (unsigned long)v.iov_base;
748 		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
749 			- p / PAGE_SIZE;
750 		if (npages >= maxpages)
751 			return maxpages;
752 	0;}),({
753 		npages++;
754 		if (npages >= maxpages)
755 			return maxpages;
756 	}),({
757 		unsigned long p = (unsigned long)v.iov_base;
758 		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
759 			- p / PAGE_SIZE;
760 		if (npages >= maxpages)
761 			return maxpages;
762 	})
763 	)
764 	return npages;
765 }
766 EXPORT_SYMBOL(iov_iter_npages);
767 
768 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
769 {
770 	*new = *old;
771 	if (new->type & ITER_BVEC)
772 		return new->bvec = kmemdup(new->bvec,
773 				    new->nr_segs * sizeof(struct bio_vec),
774 				    flags);
775 	else
776 		/* iovec and kvec have identical layout */
777 		return new->iov = kmemdup(new->iov,
778 				   new->nr_segs * sizeof(struct iovec),
779 				   flags);
780 }
781 EXPORT_SYMBOL(dup_iter);
782 
783 int import_iovec(int type, const struct iovec __user * uvector,
784 		 unsigned nr_segs, unsigned fast_segs,
785 		 struct iovec **iov, struct iov_iter *i)
786 {
787 	ssize_t n;
788 	struct iovec *p;
789 	n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
790 				  *iov, &p);
791 	if (n < 0) {
792 		if (p != *iov)
793 			kfree(p);
794 		*iov = NULL;
795 		return n;
796 	}
797 	iov_iter_init(i, type, p, nr_segs, n);
798 	*iov = p == *iov ? NULL : p;
799 	return 0;
800 }
801 EXPORT_SYMBOL(import_iovec);
802 
803 #ifdef CONFIG_COMPAT
804 #include <linux/compat.h>
805 
806 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
807 		 unsigned nr_segs, unsigned fast_segs,
808 		 struct iovec **iov, struct iov_iter *i)
809 {
810 	ssize_t n;
811 	struct iovec *p;
812 	n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
813 				  *iov, &p);
814 	if (n < 0) {
815 		if (p != *iov)
816 			kfree(p);
817 		*iov = NULL;
818 		return n;
819 	}
820 	iov_iter_init(i, type, p, nr_segs, n);
821 	*iov = p == *iov ? NULL : p;
822 	return 0;
823 }
824 #endif
825 
826 int import_single_range(int rw, void __user *buf, size_t len,
827 		 struct iovec *iov, struct iov_iter *i)
828 {
829 	if (len > MAX_RW_COUNT)
830 		len = MAX_RW_COUNT;
831 	if (unlikely(!access_ok(!rw, buf, len)))
832 		return -EFAULT;
833 
834 	iov->iov_base = buf;
835 	iov->iov_len = len;
836 	iov_iter_init(i, rw, iov, 1, len);
837 	return 0;
838 }
839 EXPORT_SYMBOL(import_single_range);
840