xref: /openbmc/linux/lib/iov_iter.c (revision 8b036556)
1 #include <linux/export.h>
2 #include <linux/uio.h>
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
6 #include <net/checksum.h>
7 
8 #define iterate_iovec(i, n, __v, __p, skip, STEP) {	\
9 	size_t left;					\
10 	size_t wanted = n;				\
11 	__p = i->iov;					\
12 	__v.iov_len = min(n, __p->iov_len - skip);	\
13 	if (likely(__v.iov_len)) {			\
14 		__v.iov_base = __p->iov_base + skip;	\
15 		left = (STEP);				\
16 		__v.iov_len -= left;			\
17 		skip += __v.iov_len;			\
18 		n -= __v.iov_len;			\
19 	} else {					\
20 		left = 0;				\
21 	}						\
22 	while (unlikely(!left && n)) {			\
23 		__p++;					\
24 		__v.iov_len = min(n, __p->iov_len);	\
25 		if (unlikely(!__v.iov_len))		\
26 			continue;			\
27 		__v.iov_base = __p->iov_base;		\
28 		left = (STEP);				\
29 		__v.iov_len -= left;			\
30 		skip = __v.iov_len;			\
31 		n -= __v.iov_len;			\
32 	}						\
33 	n = wanted - n;					\
34 }
35 
36 #define iterate_kvec(i, n, __v, __p, skip, STEP) {	\
37 	size_t wanted = n;				\
38 	__p = i->kvec;					\
39 	__v.iov_len = min(n, __p->iov_len - skip);	\
40 	if (likely(__v.iov_len)) {			\
41 		__v.iov_base = __p->iov_base + skip;	\
42 		(void)(STEP);				\
43 		skip += __v.iov_len;			\
44 		n -= __v.iov_len;			\
45 	}						\
46 	while (unlikely(n)) {				\
47 		__p++;					\
48 		__v.iov_len = min(n, __p->iov_len);	\
49 		if (unlikely(!__v.iov_len))		\
50 			continue;			\
51 		__v.iov_base = __p->iov_base;		\
52 		(void)(STEP);				\
53 		skip = __v.iov_len;			\
54 		n -= __v.iov_len;			\
55 	}						\
56 	n = wanted;					\
57 }
58 
59 #define iterate_bvec(i, n, __v, __p, skip, STEP) {	\
60 	size_t wanted = n;				\
61 	__p = i->bvec;					\
62 	__v.bv_len = min_t(size_t, n, __p->bv_len - skip);	\
63 	if (likely(__v.bv_len)) {			\
64 		__v.bv_page = __p->bv_page;		\
65 		__v.bv_offset = __p->bv_offset + skip; 	\
66 		(void)(STEP);				\
67 		skip += __v.bv_len;			\
68 		n -= __v.bv_len;			\
69 	}						\
70 	while (unlikely(n)) {				\
71 		__p++;					\
72 		__v.bv_len = min_t(size_t, n, __p->bv_len);	\
73 		if (unlikely(!__v.bv_len))		\
74 			continue;			\
75 		__v.bv_page = __p->bv_page;		\
76 		__v.bv_offset = __p->bv_offset;		\
77 		(void)(STEP);				\
78 		skip = __v.bv_len;			\
79 		n -= __v.bv_len;			\
80 	}						\
81 	n = wanted;					\
82 }
83 
84 #define iterate_all_kinds(i, n, v, I, B, K) {			\
85 	size_t skip = i->iov_offset;				\
86 	if (unlikely(i->type & ITER_BVEC)) {			\
87 		const struct bio_vec *bvec;			\
88 		struct bio_vec v;				\
89 		iterate_bvec(i, n, v, bvec, skip, (B))		\
90 	} else if (unlikely(i->type & ITER_KVEC)) {		\
91 		const struct kvec *kvec;			\
92 		struct kvec v;					\
93 		iterate_kvec(i, n, v, kvec, skip, (K))		\
94 	} else {						\
95 		const struct iovec *iov;			\
96 		struct iovec v;					\
97 		iterate_iovec(i, n, v, iov, skip, (I))		\
98 	}							\
99 }
100 
101 #define iterate_and_advance(i, n, v, I, B, K) {			\
102 	size_t skip = i->iov_offset;				\
103 	if (unlikely(i->type & ITER_BVEC)) {			\
104 		const struct bio_vec *bvec;			\
105 		struct bio_vec v;				\
106 		iterate_bvec(i, n, v, bvec, skip, (B))		\
107 		if (skip == bvec->bv_len) {			\
108 			bvec++;					\
109 			skip = 0;				\
110 		}						\
111 		i->nr_segs -= bvec - i->bvec;			\
112 		i->bvec = bvec;					\
113 	} else if (unlikely(i->type & ITER_KVEC)) {		\
114 		const struct kvec *kvec;			\
115 		struct kvec v;					\
116 		iterate_kvec(i, n, v, kvec, skip, (K))		\
117 		if (skip == kvec->iov_len) {			\
118 			kvec++;					\
119 			skip = 0;				\
120 		}						\
121 		i->nr_segs -= kvec - i->kvec;			\
122 		i->kvec = kvec;					\
123 	} else {						\
124 		const struct iovec *iov;			\
125 		struct iovec v;					\
126 		iterate_iovec(i, n, v, iov, skip, (I))		\
127 		if (skip == iov->iov_len) {			\
128 			iov++;					\
129 			skip = 0;				\
130 		}						\
131 		i->nr_segs -= iov - i->iov;			\
132 		i->iov = iov;					\
133 	}							\
134 	i->count -= n;						\
135 	i->iov_offset = skip;					\
136 }
137 
138 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
139 			 struct iov_iter *i)
140 {
141 	size_t skip, copy, left, wanted;
142 	const struct iovec *iov;
143 	char __user *buf;
144 	void *kaddr, *from;
145 
146 	if (unlikely(bytes > i->count))
147 		bytes = i->count;
148 
149 	if (unlikely(!bytes))
150 		return 0;
151 
152 	wanted = bytes;
153 	iov = i->iov;
154 	skip = i->iov_offset;
155 	buf = iov->iov_base + skip;
156 	copy = min(bytes, iov->iov_len - skip);
157 
158 	if (!fault_in_pages_writeable(buf, copy)) {
159 		kaddr = kmap_atomic(page);
160 		from = kaddr + offset;
161 
162 		/* first chunk, usually the only one */
163 		left = __copy_to_user_inatomic(buf, from, copy);
164 		copy -= left;
165 		skip += copy;
166 		from += copy;
167 		bytes -= copy;
168 
169 		while (unlikely(!left && bytes)) {
170 			iov++;
171 			buf = iov->iov_base;
172 			copy = min(bytes, iov->iov_len);
173 			left = __copy_to_user_inatomic(buf, from, copy);
174 			copy -= left;
175 			skip = copy;
176 			from += copy;
177 			bytes -= copy;
178 		}
179 		if (likely(!bytes)) {
180 			kunmap_atomic(kaddr);
181 			goto done;
182 		}
183 		offset = from - kaddr;
184 		buf += copy;
185 		kunmap_atomic(kaddr);
186 		copy = min(bytes, iov->iov_len - skip);
187 	}
188 	/* Too bad - revert to non-atomic kmap */
189 	kaddr = kmap(page);
190 	from = kaddr + offset;
191 	left = __copy_to_user(buf, from, copy);
192 	copy -= left;
193 	skip += copy;
194 	from += copy;
195 	bytes -= copy;
196 	while (unlikely(!left && bytes)) {
197 		iov++;
198 		buf = iov->iov_base;
199 		copy = min(bytes, iov->iov_len);
200 		left = __copy_to_user(buf, from, copy);
201 		copy -= left;
202 		skip = copy;
203 		from += copy;
204 		bytes -= copy;
205 	}
206 	kunmap(page);
207 done:
208 	if (skip == iov->iov_len) {
209 		iov++;
210 		skip = 0;
211 	}
212 	i->count -= wanted - bytes;
213 	i->nr_segs -= iov - i->iov;
214 	i->iov = iov;
215 	i->iov_offset = skip;
216 	return wanted - bytes;
217 }
218 
219 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
220 			 struct iov_iter *i)
221 {
222 	size_t skip, copy, left, wanted;
223 	const struct iovec *iov;
224 	char __user *buf;
225 	void *kaddr, *to;
226 
227 	if (unlikely(bytes > i->count))
228 		bytes = i->count;
229 
230 	if (unlikely(!bytes))
231 		return 0;
232 
233 	wanted = bytes;
234 	iov = i->iov;
235 	skip = i->iov_offset;
236 	buf = iov->iov_base + skip;
237 	copy = min(bytes, iov->iov_len - skip);
238 
239 	if (!fault_in_pages_readable(buf, copy)) {
240 		kaddr = kmap_atomic(page);
241 		to = kaddr + offset;
242 
243 		/* first chunk, usually the only one */
244 		left = __copy_from_user_inatomic(to, buf, copy);
245 		copy -= left;
246 		skip += copy;
247 		to += copy;
248 		bytes -= copy;
249 
250 		while (unlikely(!left && bytes)) {
251 			iov++;
252 			buf = iov->iov_base;
253 			copy = min(bytes, iov->iov_len);
254 			left = __copy_from_user_inatomic(to, buf, copy);
255 			copy -= left;
256 			skip = copy;
257 			to += copy;
258 			bytes -= copy;
259 		}
260 		if (likely(!bytes)) {
261 			kunmap_atomic(kaddr);
262 			goto done;
263 		}
264 		offset = to - kaddr;
265 		buf += copy;
266 		kunmap_atomic(kaddr);
267 		copy = min(bytes, iov->iov_len - skip);
268 	}
269 	/* Too bad - revert to non-atomic kmap */
270 	kaddr = kmap(page);
271 	to = kaddr + offset;
272 	left = __copy_from_user(to, buf, copy);
273 	copy -= left;
274 	skip += copy;
275 	to += copy;
276 	bytes -= copy;
277 	while (unlikely(!left && bytes)) {
278 		iov++;
279 		buf = iov->iov_base;
280 		copy = min(bytes, iov->iov_len);
281 		left = __copy_from_user(to, buf, copy);
282 		copy -= left;
283 		skip = copy;
284 		to += copy;
285 		bytes -= copy;
286 	}
287 	kunmap(page);
288 done:
289 	if (skip == iov->iov_len) {
290 		iov++;
291 		skip = 0;
292 	}
293 	i->count -= wanted - bytes;
294 	i->nr_segs -= iov - i->iov;
295 	i->iov = iov;
296 	i->iov_offset = skip;
297 	return wanted - bytes;
298 }
299 
300 /*
301  * Fault in the first iovec of the given iov_iter, to a maximum length
302  * of bytes. Returns 0 on success, or non-zero if the memory could not be
303  * accessed (ie. because it is an invalid address).
304  *
305  * writev-intensive code may want this to prefault several iovecs -- that
306  * would be possible (callers must not rely on the fact that _only_ the
307  * first iovec will be faulted with the current implementation).
308  */
309 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
310 {
311 	if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
312 		char __user *buf = i->iov->iov_base + i->iov_offset;
313 		bytes = min(bytes, i->iov->iov_len - i->iov_offset);
314 		return fault_in_pages_readable(buf, bytes);
315 	}
316 	return 0;
317 }
318 EXPORT_SYMBOL(iov_iter_fault_in_readable);
319 
320 void iov_iter_init(struct iov_iter *i, int direction,
321 			const struct iovec *iov, unsigned long nr_segs,
322 			size_t count)
323 {
324 	/* It will get better.  Eventually... */
325 	if (segment_eq(get_fs(), KERNEL_DS)) {
326 		direction |= ITER_KVEC;
327 		i->type = direction;
328 		i->kvec = (struct kvec *)iov;
329 	} else {
330 		i->type = direction;
331 		i->iov = iov;
332 	}
333 	i->nr_segs = nr_segs;
334 	i->iov_offset = 0;
335 	i->count = count;
336 }
337 EXPORT_SYMBOL(iov_iter_init);
338 
339 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
340 {
341 	char *from = kmap_atomic(page);
342 	memcpy(to, from + offset, len);
343 	kunmap_atomic(from);
344 }
345 
346 static void memcpy_to_page(struct page *page, size_t offset, char *from, size_t len)
347 {
348 	char *to = kmap_atomic(page);
349 	memcpy(to + offset, from, len);
350 	kunmap_atomic(to);
351 }
352 
353 static void memzero_page(struct page *page, size_t offset, size_t len)
354 {
355 	char *addr = kmap_atomic(page);
356 	memset(addr + offset, 0, len);
357 	kunmap_atomic(addr);
358 }
359 
360 size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i)
361 {
362 	char *from = addr;
363 	if (unlikely(bytes > i->count))
364 		bytes = i->count;
365 
366 	if (unlikely(!bytes))
367 		return 0;
368 
369 	iterate_and_advance(i, bytes, v,
370 		__copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
371 			       v.iov_len),
372 		memcpy_to_page(v.bv_page, v.bv_offset,
373 			       (from += v.bv_len) - v.bv_len, v.bv_len),
374 		memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
375 	)
376 
377 	return bytes;
378 }
379 EXPORT_SYMBOL(copy_to_iter);
380 
381 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
382 {
383 	char *to = addr;
384 	if (unlikely(bytes > i->count))
385 		bytes = i->count;
386 
387 	if (unlikely(!bytes))
388 		return 0;
389 
390 	iterate_and_advance(i, bytes, v,
391 		__copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
392 				 v.iov_len),
393 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
394 				 v.bv_offset, v.bv_len),
395 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
396 	)
397 
398 	return bytes;
399 }
400 EXPORT_SYMBOL(copy_from_iter);
401 
402 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
403 {
404 	char *to = addr;
405 	if (unlikely(bytes > i->count))
406 		bytes = i->count;
407 
408 	if (unlikely(!bytes))
409 		return 0;
410 
411 	iterate_and_advance(i, bytes, v,
412 		__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
413 					 v.iov_base, v.iov_len),
414 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
415 				 v.bv_offset, v.bv_len),
416 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
417 	)
418 
419 	return bytes;
420 }
421 EXPORT_SYMBOL(copy_from_iter_nocache);
422 
423 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
424 			 struct iov_iter *i)
425 {
426 	if (i->type & (ITER_BVEC|ITER_KVEC)) {
427 		void *kaddr = kmap_atomic(page);
428 		size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
429 		kunmap_atomic(kaddr);
430 		return wanted;
431 	} else
432 		return copy_page_to_iter_iovec(page, offset, bytes, i);
433 }
434 EXPORT_SYMBOL(copy_page_to_iter);
435 
436 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
437 			 struct iov_iter *i)
438 {
439 	if (i->type & (ITER_BVEC|ITER_KVEC)) {
440 		void *kaddr = kmap_atomic(page);
441 		size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
442 		kunmap_atomic(kaddr);
443 		return wanted;
444 	} else
445 		return copy_page_from_iter_iovec(page, offset, bytes, i);
446 }
447 EXPORT_SYMBOL(copy_page_from_iter);
448 
449 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
450 {
451 	if (unlikely(bytes > i->count))
452 		bytes = i->count;
453 
454 	if (unlikely(!bytes))
455 		return 0;
456 
457 	iterate_and_advance(i, bytes, v,
458 		__clear_user(v.iov_base, v.iov_len),
459 		memzero_page(v.bv_page, v.bv_offset, v.bv_len),
460 		memset(v.iov_base, 0, v.iov_len)
461 	)
462 
463 	return bytes;
464 }
465 EXPORT_SYMBOL(iov_iter_zero);
466 
467 size_t iov_iter_copy_from_user_atomic(struct page *page,
468 		struct iov_iter *i, unsigned long offset, size_t bytes)
469 {
470 	char *kaddr = kmap_atomic(page), *p = kaddr + offset;
471 	iterate_all_kinds(i, bytes, v,
472 		__copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
473 					  v.iov_base, v.iov_len),
474 		memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
475 				 v.bv_offset, v.bv_len),
476 		memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
477 	)
478 	kunmap_atomic(kaddr);
479 	return bytes;
480 }
481 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
482 
483 void iov_iter_advance(struct iov_iter *i, size_t size)
484 {
485 	iterate_and_advance(i, size, v, 0, 0, 0)
486 }
487 EXPORT_SYMBOL(iov_iter_advance);
488 
489 /*
490  * Return the count of just the current iov_iter segment.
491  */
492 size_t iov_iter_single_seg_count(const struct iov_iter *i)
493 {
494 	if (i->nr_segs == 1)
495 		return i->count;
496 	else if (i->type & ITER_BVEC)
497 		return min(i->count, i->bvec->bv_len - i->iov_offset);
498 	else
499 		return min(i->count, i->iov->iov_len - i->iov_offset);
500 }
501 EXPORT_SYMBOL(iov_iter_single_seg_count);
502 
503 void iov_iter_kvec(struct iov_iter *i, int direction,
504 			const struct kvec *kvec, unsigned long nr_segs,
505 			size_t count)
506 {
507 	BUG_ON(!(direction & ITER_KVEC));
508 	i->type = direction;
509 	i->kvec = kvec;
510 	i->nr_segs = nr_segs;
511 	i->iov_offset = 0;
512 	i->count = count;
513 }
514 EXPORT_SYMBOL(iov_iter_kvec);
515 
516 void iov_iter_bvec(struct iov_iter *i, int direction,
517 			const struct bio_vec *bvec, unsigned long nr_segs,
518 			size_t count)
519 {
520 	BUG_ON(!(direction & ITER_BVEC));
521 	i->type = direction;
522 	i->bvec = bvec;
523 	i->nr_segs = nr_segs;
524 	i->iov_offset = 0;
525 	i->count = count;
526 }
527 EXPORT_SYMBOL(iov_iter_bvec);
528 
529 unsigned long iov_iter_alignment(const struct iov_iter *i)
530 {
531 	unsigned long res = 0;
532 	size_t size = i->count;
533 
534 	if (!size)
535 		return 0;
536 
537 	iterate_all_kinds(i, size, v,
538 		(res |= (unsigned long)v.iov_base | v.iov_len, 0),
539 		res |= v.bv_offset | v.bv_len,
540 		res |= (unsigned long)v.iov_base | v.iov_len
541 	)
542 	return res;
543 }
544 EXPORT_SYMBOL(iov_iter_alignment);
545 
546 ssize_t iov_iter_get_pages(struct iov_iter *i,
547 		   struct page **pages, size_t maxsize, unsigned maxpages,
548 		   size_t *start)
549 {
550 	if (maxsize > i->count)
551 		maxsize = i->count;
552 
553 	if (!maxsize)
554 		return 0;
555 
556 	iterate_all_kinds(i, maxsize, v, ({
557 		unsigned long addr = (unsigned long)v.iov_base;
558 		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
559 		int n;
560 		int res;
561 
562 		if (len > maxpages * PAGE_SIZE)
563 			len = maxpages * PAGE_SIZE;
564 		addr &= ~(PAGE_SIZE - 1);
565 		n = DIV_ROUND_UP(len, PAGE_SIZE);
566 		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
567 		if (unlikely(res < 0))
568 			return res;
569 		return (res == n ? len : res * PAGE_SIZE) - *start;
570 	0;}),({
571 		/* can't be more than PAGE_SIZE */
572 		*start = v.bv_offset;
573 		get_page(*pages = v.bv_page);
574 		return v.bv_len;
575 	}),({
576 		return -EFAULT;
577 	})
578 	)
579 	return 0;
580 }
581 EXPORT_SYMBOL(iov_iter_get_pages);
582 
583 static struct page **get_pages_array(size_t n)
584 {
585 	struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
586 	if (!p)
587 		p = vmalloc(n * sizeof(struct page *));
588 	return p;
589 }
590 
591 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
592 		   struct page ***pages, size_t maxsize,
593 		   size_t *start)
594 {
595 	struct page **p;
596 
597 	if (maxsize > i->count)
598 		maxsize = i->count;
599 
600 	if (!maxsize)
601 		return 0;
602 
603 	iterate_all_kinds(i, maxsize, v, ({
604 		unsigned long addr = (unsigned long)v.iov_base;
605 		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
606 		int n;
607 		int res;
608 
609 		addr &= ~(PAGE_SIZE - 1);
610 		n = DIV_ROUND_UP(len, PAGE_SIZE);
611 		p = get_pages_array(n);
612 		if (!p)
613 			return -ENOMEM;
614 		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
615 		if (unlikely(res < 0)) {
616 			kvfree(p);
617 			return res;
618 		}
619 		*pages = p;
620 		return (res == n ? len : res * PAGE_SIZE) - *start;
621 	0;}),({
622 		/* can't be more than PAGE_SIZE */
623 		*start = v.bv_offset;
624 		*pages = p = get_pages_array(1);
625 		if (!p)
626 			return -ENOMEM;
627 		get_page(*p = v.bv_page);
628 		return v.bv_len;
629 	}),({
630 		return -EFAULT;
631 	})
632 	)
633 	return 0;
634 }
635 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
636 
637 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
638 			       struct iov_iter *i)
639 {
640 	char *to = addr;
641 	__wsum sum, next;
642 	size_t off = 0;
643 	if (unlikely(bytes > i->count))
644 		bytes = i->count;
645 
646 	if (unlikely(!bytes))
647 		return 0;
648 
649 	sum = *csum;
650 	iterate_and_advance(i, bytes, v, ({
651 		int err = 0;
652 		next = csum_and_copy_from_user(v.iov_base,
653 					       (to += v.iov_len) - v.iov_len,
654 					       v.iov_len, 0, &err);
655 		if (!err) {
656 			sum = csum_block_add(sum, next, off);
657 			off += v.iov_len;
658 		}
659 		err ? v.iov_len : 0;
660 	}), ({
661 		char *p = kmap_atomic(v.bv_page);
662 		next = csum_partial_copy_nocheck(p + v.bv_offset,
663 						 (to += v.bv_len) - v.bv_len,
664 						 v.bv_len, 0);
665 		kunmap_atomic(p);
666 		sum = csum_block_add(sum, next, off);
667 		off += v.bv_len;
668 	}),({
669 		next = csum_partial_copy_nocheck(v.iov_base,
670 						 (to += v.iov_len) - v.iov_len,
671 						 v.iov_len, 0);
672 		sum = csum_block_add(sum, next, off);
673 		off += v.iov_len;
674 	})
675 	)
676 	*csum = sum;
677 	return bytes;
678 }
679 EXPORT_SYMBOL(csum_and_copy_from_iter);
680 
681 size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum,
682 			     struct iov_iter *i)
683 {
684 	char *from = addr;
685 	__wsum sum, next;
686 	size_t off = 0;
687 	if (unlikely(bytes > i->count))
688 		bytes = i->count;
689 
690 	if (unlikely(!bytes))
691 		return 0;
692 
693 	sum = *csum;
694 	iterate_and_advance(i, bytes, v, ({
695 		int err = 0;
696 		next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
697 					     v.iov_base,
698 					     v.iov_len, 0, &err);
699 		if (!err) {
700 			sum = csum_block_add(sum, next, off);
701 			off += v.iov_len;
702 		}
703 		err ? v.iov_len : 0;
704 	}), ({
705 		char *p = kmap_atomic(v.bv_page);
706 		next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
707 						 p + v.bv_offset,
708 						 v.bv_len, 0);
709 		kunmap_atomic(p);
710 		sum = csum_block_add(sum, next, off);
711 		off += v.bv_len;
712 	}),({
713 		next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
714 						 v.iov_base,
715 						 v.iov_len, 0);
716 		sum = csum_block_add(sum, next, off);
717 		off += v.iov_len;
718 	})
719 	)
720 	*csum = sum;
721 	return bytes;
722 }
723 EXPORT_SYMBOL(csum_and_copy_to_iter);
724 
725 int iov_iter_npages(const struct iov_iter *i, int maxpages)
726 {
727 	size_t size = i->count;
728 	int npages = 0;
729 
730 	if (!size)
731 		return 0;
732 
733 	iterate_all_kinds(i, size, v, ({
734 		unsigned long p = (unsigned long)v.iov_base;
735 		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
736 			- p / PAGE_SIZE;
737 		if (npages >= maxpages)
738 			return maxpages;
739 	0;}),({
740 		npages++;
741 		if (npages >= maxpages)
742 			return maxpages;
743 	}),({
744 		unsigned long p = (unsigned long)v.iov_base;
745 		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
746 			- p / PAGE_SIZE;
747 		if (npages >= maxpages)
748 			return maxpages;
749 	})
750 	)
751 	return npages;
752 }
753 EXPORT_SYMBOL(iov_iter_npages);
754 
755 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
756 {
757 	*new = *old;
758 	if (new->type & ITER_BVEC)
759 		return new->bvec = kmemdup(new->bvec,
760 				    new->nr_segs * sizeof(struct bio_vec),
761 				    flags);
762 	else
763 		/* iovec and kvec have identical layout */
764 		return new->iov = kmemdup(new->iov,
765 				   new->nr_segs * sizeof(struct iovec),
766 				   flags);
767 }
768 EXPORT_SYMBOL(dup_iter);
769