xref: /openbmc/linux/lib/iov_iter.c (revision bc5aa3a0)
1 #include <linux/export.h>
2 #include <linux/uio.h>
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
6 #include <net/checksum.h>
7 
8 #define iterate_iovec(i, n, __v, __p, skip, STEP) {	\
9 	size_t left;					\
10 	size_t wanted = n;				\
11 	__p = i->iov;					\
12 	__v.iov_len = min(n, __p->iov_len - skip);	\
13 	if (likely(__v.iov_len)) {			\
14 		__v.iov_base = __p->iov_base + skip;	\
15 		left = (STEP);				\
16 		__v.iov_len -= left;			\
17 		skip += __v.iov_len;			\
18 		n -= __v.iov_len;			\
19 	} else {					\
20 		left = 0;				\
21 	}						\
22 	while (unlikely(!left && n)) {			\
23 		__p++;					\
24 		__v.iov_len = min(n, __p->iov_len);	\
25 		if (unlikely(!__v.iov_len))		\
26 			continue;			\
27 		__v.iov_base = __p->iov_base;		\
28 		left = (STEP);				\
29 		__v.iov_len -= left;			\
30 		skip = __v.iov_len;			\
31 		n -= __v.iov_len;			\
32 	}						\
33 	n = wanted - n;					\
34 }
35 
36 #define iterate_kvec(i, n, __v, __p, skip, STEP) {	\
37 	size_t wanted = n;				\
38 	__p = i->kvec;					\
39 	__v.iov_len = min(n, __p->iov_len - skip);	\
40 	if (likely(__v.iov_len)) {			\
41 		__v.iov_base = __p->iov_base + skip;	\
42 		(void)(STEP);				\
43 		skip += __v.iov_len;			\
44 		n -= __v.iov_len;			\
45 	}						\
46 	while (unlikely(n)) {				\
47 		__p++;					\
48 		__v.iov_len = min(n, __p->iov_len);	\
49 		if (unlikely(!__v.iov_len))		\
50 			continue;			\
51 		__v.iov_base = __p->iov_base;		\
52 		(void)(STEP);				\
53 		skip = __v.iov_len;			\
54 		n -= __v.iov_len;			\
55 	}						\
56 	n = wanted;					\
57 }
58 
59 #define iterate_bvec(i, n, __v, __bi, skip, STEP) {	\
60 	struct bvec_iter __start;			\
61 	__start.bi_size = n;				\
62 	__start.bi_bvec_done = skip;			\
63 	__start.bi_idx = 0;				\
64 	for_each_bvec(__v, i->bvec, __bi, __start) {	\
65 		if (!__v.bv_len)			\
66 			continue;			\
67 		(void)(STEP);				\
68 	}						\
69 }
70 
71 #define iterate_all_kinds(i, n, v, I, B, K) {			\
72 	size_t skip = i->iov_offset;				\
73 	if (unlikely(i->type & ITER_BVEC)) {			\
74 		struct bio_vec v;				\
75 		struct bvec_iter __bi;				\
76 		iterate_bvec(i, n, v, __bi, skip, (B))		\
77 	} else if (unlikely(i->type & ITER_KVEC)) {		\
78 		const struct kvec *kvec;			\
79 		struct kvec v;					\
80 		iterate_kvec(i, n, v, kvec, skip, (K))		\
81 	} else {						\
82 		const struct iovec *iov;			\
83 		struct iovec v;					\
84 		iterate_iovec(i, n, v, iov, skip, (I))		\
85 	}							\
86 }
87 
88 #define iterate_and_advance(i, n, v, I, B, K) {			\
89 	if (unlikely(i->count < n))				\
90 		n = i->count;					\
91 	if (i->count) {						\
92 		size_t skip = i->iov_offset;			\
93 		if (unlikely(i->type & ITER_BVEC)) {		\
94 			const struct bio_vec *bvec = i->bvec;	\
95 			struct bio_vec v;			\
96 			struct bvec_iter __bi;			\
97 			iterate_bvec(i, n, v, __bi, skip, (B))	\
98 			i->bvec = __bvec_iter_bvec(i->bvec, __bi);	\
99 			i->nr_segs -= i->bvec - bvec;		\
100 			skip = __bi.bi_bvec_done;		\
101 		} else if (unlikely(i->type & ITER_KVEC)) {	\
102 			const struct kvec *kvec;		\
103 			struct kvec v;				\
104 			iterate_kvec(i, n, v, kvec, skip, (K))	\
105 			if (skip == kvec->iov_len) {		\
106 				kvec++;				\
107 				skip = 0;			\
108 			}					\
109 			i->nr_segs -= kvec - i->kvec;		\
110 			i->kvec = kvec;				\
111 		} else {					\
112 			const struct iovec *iov;		\
113 			struct iovec v;				\
114 			iterate_iovec(i, n, v, iov, skip, (I))	\
115 			if (skip == iov->iov_len) {		\
116 				iov++;				\
117 				skip = 0;			\
118 			}					\
119 			i->nr_segs -= iov - i->iov;		\
120 			i->iov = iov;				\
121 		}						\
122 		i->count -= n;					\
123 		i->iov_offset = skip;				\
124 	}							\
125 }
126 
127 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
128 			 struct iov_iter *i)
129 {
130 	size_t skip, copy, left, wanted;
131 	const struct iovec *iov;
132 	char __user *buf;
133 	void *kaddr, *from;
134 
135 	if (unlikely(bytes > i->count))
136 		bytes = i->count;
137 
138 	if (unlikely(!bytes))
139 		return 0;
140 
141 	wanted = bytes;
142 	iov = i->iov;
143 	skip = i->iov_offset;
144 	buf = iov->iov_base + skip;
145 	copy = min(bytes, iov->iov_len - skip);
146 
147 	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
148 		kaddr = kmap_atomic(page);
149 		from = kaddr + offset;
150 
151 		/* first chunk, usually the only one */
152 		left = __copy_to_user_inatomic(buf, from, copy);
153 		copy -= left;
154 		skip += copy;
155 		from += copy;
156 		bytes -= copy;
157 
158 		while (unlikely(!left && bytes)) {
159 			iov++;
160 			buf = iov->iov_base;
161 			copy = min(bytes, iov->iov_len);
162 			left = __copy_to_user_inatomic(buf, from, copy);
163 			copy -= left;
164 			skip = copy;
165 			from += copy;
166 			bytes -= copy;
167 		}
168 		if (likely(!bytes)) {
169 			kunmap_atomic(kaddr);
170 			goto done;
171 		}
172 		offset = from - kaddr;
173 		buf += copy;
174 		kunmap_atomic(kaddr);
175 		copy = min(bytes, iov->iov_len - skip);
176 	}
177 	/* Too bad - revert to non-atomic kmap */
178 
179 	kaddr = kmap(page);
180 	from = kaddr + offset;
181 	left = __copy_to_user(buf, from, copy);
182 	copy -= left;
183 	skip += copy;
184 	from += copy;
185 	bytes -= copy;
186 	while (unlikely(!left && bytes)) {
187 		iov++;
188 		buf = iov->iov_base;
189 		copy = min(bytes, iov->iov_len);
190 		left = __copy_to_user(buf, from, copy);
191 		copy -= left;
192 		skip = copy;
193 		from += copy;
194 		bytes -= copy;
195 	}
196 	kunmap(page);
197 
198 done:
199 	if (skip == iov->iov_len) {
200 		iov++;
201 		skip = 0;
202 	}
203 	i->count -= wanted - bytes;
204 	i->nr_segs -= iov - i->iov;
205 	i->iov = iov;
206 	i->iov_offset = skip;
207 	return wanted - bytes;
208 }
209 
210 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
211 			 struct iov_iter *i)
212 {
213 	size_t skip, copy, left, wanted;
214 	const struct iovec *iov;
215 	char __user *buf;
216 	void *kaddr, *to;
217 
218 	if (unlikely(bytes > i->count))
219 		bytes = i->count;
220 
221 	if (unlikely(!bytes))
222 		return 0;
223 
224 	wanted = bytes;
225 	iov = i->iov;
226 	skip = i->iov_offset;
227 	buf = iov->iov_base + skip;
228 	copy = min(bytes, iov->iov_len - skip);
229 
230 	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
231 		kaddr = kmap_atomic(page);
232 		to = kaddr + offset;
233 
234 		/* first chunk, usually the only one */
235 		left = __copy_from_user_inatomic(to, buf, copy);
236 		copy -= left;
237 		skip += copy;
238 		to += copy;
239 		bytes -= copy;
240 
241 		while (unlikely(!left && bytes)) {
242 			iov++;
243 			buf = iov->iov_base;
244 			copy = min(bytes, iov->iov_len);
245 			left = __copy_from_user_inatomic(to, buf, copy);
246 			copy -= left;
247 			skip = copy;
248 			to += copy;
249 			bytes -= copy;
250 		}
251 		if (likely(!bytes)) {
252 			kunmap_atomic(kaddr);
253 			goto done;
254 		}
255 		offset = to - kaddr;
256 		buf += copy;
257 		kunmap_atomic(kaddr);
258 		copy = min(bytes, iov->iov_len - skip);
259 	}
260 	/* Too bad - revert to non-atomic kmap */
261 
262 	kaddr = kmap(page);
263 	to = kaddr + offset;
264 	left = __copy_from_user(to, buf, copy);
265 	copy -= left;
266 	skip += copy;
267 	to += copy;
268 	bytes -= copy;
269 	while (unlikely(!left && bytes)) {
270 		iov++;
271 		buf = iov->iov_base;
272 		copy = min(bytes, iov->iov_len);
273 		left = __copy_from_user(to, buf, copy);
274 		copy -= left;
275 		skip = copy;
276 		to += copy;
277 		bytes -= copy;
278 	}
279 	kunmap(page);
280 
281 done:
282 	if (skip == iov->iov_len) {
283 		iov++;
284 		skip = 0;
285 	}
286 	i->count -= wanted - bytes;
287 	i->nr_segs -= iov - i->iov;
288 	i->iov = iov;
289 	i->iov_offset = skip;
290 	return wanted - bytes;
291 }
292 
293 /*
294  * Fault in the first iovec of the given iov_iter, to a maximum length
295  * of bytes. Returns 0 on success, or non-zero if the memory could not be
296  * accessed (ie. because it is an invalid address).
297  *
298  * writev-intensive code may want this to prefault several iovecs -- that
299  * would be possible (callers must not rely on the fact that _only_ the
300  * first iovec will be faulted with the current implementation).
301  */
302 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
303 {
304 	if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
305 		char __user *buf = i->iov->iov_base + i->iov_offset;
306 		bytes = min(bytes, i->iov->iov_len - i->iov_offset);
307 		return fault_in_pages_readable(buf, bytes);
308 	}
309 	return 0;
310 }
311 EXPORT_SYMBOL(iov_iter_fault_in_readable);
312 
313 /*
314  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
315  * bytes.  For each iovec, fault in each page that constitutes the iovec.
316  *
317  * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
318  * because it is an invalid address).
319  */
320 int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
321 {
322 	size_t skip = i->iov_offset;
323 	const struct iovec *iov;
324 	int err;
325 	struct iovec v;
326 
327 	if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
328 		iterate_iovec(i, bytes, v, iov, skip, ({
329 			err = fault_in_multipages_readable(v.iov_base,
330 					v.iov_len);
331 			if (unlikely(err))
332 			return err;
333 		0;}))
334 	}
335 	return 0;
336 }
337 EXPORT_SYMBOL(iov_iter_fault_in_multipages_readable);
338 
339 void iov_iter_init(struct iov_iter *i, int direction,
340 			const struct iovec *iov, unsigned long nr_segs,
341 			size_t count)
342 {
343 	/* It will get better.  Eventually... */
344 	if (segment_eq(get_fs(), KERNEL_DS)) {
345 		direction |= ITER_KVEC;
346 		i->type = direction;
347 		i->kvec = (struct kvec *)iov;
348 	} else {
349 		i->type = direction;
350 		i->iov = iov;
351 	}
352 	i->nr_segs = nr_segs;
353 	i->iov_offset = 0;
354 	i->count = count;
355 }
356 EXPORT_SYMBOL(iov_iter_init);
357 
358 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
359 {
360 	char *from = kmap_atomic(page);
361 	memcpy(to, from + offset, len);
362 	kunmap_atomic(from);
363 }
364 
365 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
366 {
367 	char *to = kmap_atomic(page);
368 	memcpy(to + offset, from, len);
369 	kunmap_atomic(to);
370 }
371 
372 static void memzero_page(struct page *page, size_t offset, size_t len)
373 {
374 	char *addr = kmap_atomic(page);
375 	memset(addr + offset, 0, len);
376 	kunmap_atomic(addr);
377 }
378 
379 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
380 {
381 	const char *from = addr;
382 	iterate_and_advance(i, bytes, v,
383 		__copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
384 			       v.iov_len),
385 		memcpy_to_page(v.bv_page, v.bv_offset,
386 			       (from += v.bv_len) - v.bv_len, v.bv_len),
387 		memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
388 	)
389 
390 	return bytes;
391 }
392 EXPORT_SYMBOL(copy_to_iter);
393 
394 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
395 {
396 	char *to = addr;
397 	iterate_and_advance(i, bytes, v,
398 		__copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
399 				 v.iov_len),
400 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
401 				 v.bv_offset, v.bv_len),
402 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
403 	)
404 
405 	return bytes;
406 }
407 EXPORT_SYMBOL(copy_from_iter);
408 
409 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
410 {
411 	char *to = addr;
412 	iterate_and_advance(i, bytes, v,
413 		__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
414 					 v.iov_base, v.iov_len),
415 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
416 				 v.bv_offset, v.bv_len),
417 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
418 	)
419 
420 	return bytes;
421 }
422 EXPORT_SYMBOL(copy_from_iter_nocache);
423 
424 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
425 			 struct iov_iter *i)
426 {
427 	if (i->type & (ITER_BVEC|ITER_KVEC)) {
428 		void *kaddr = kmap_atomic(page);
429 		size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
430 		kunmap_atomic(kaddr);
431 		return wanted;
432 	} else
433 		return copy_page_to_iter_iovec(page, offset, bytes, i);
434 }
435 EXPORT_SYMBOL(copy_page_to_iter);
436 
437 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
438 			 struct iov_iter *i)
439 {
440 	if (i->type & (ITER_BVEC|ITER_KVEC)) {
441 		void *kaddr = kmap_atomic(page);
442 		size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
443 		kunmap_atomic(kaddr);
444 		return wanted;
445 	} else
446 		return copy_page_from_iter_iovec(page, offset, bytes, i);
447 }
448 EXPORT_SYMBOL(copy_page_from_iter);
449 
450 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
451 {
452 	iterate_and_advance(i, bytes, v,
453 		__clear_user(v.iov_base, v.iov_len),
454 		memzero_page(v.bv_page, v.bv_offset, v.bv_len),
455 		memset(v.iov_base, 0, v.iov_len)
456 	)
457 
458 	return bytes;
459 }
460 EXPORT_SYMBOL(iov_iter_zero);
461 
462 size_t iov_iter_copy_from_user_atomic(struct page *page,
463 		struct iov_iter *i, unsigned long offset, size_t bytes)
464 {
465 	char *kaddr = kmap_atomic(page), *p = kaddr + offset;
466 	iterate_all_kinds(i, bytes, v,
467 		__copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
468 					  v.iov_base, v.iov_len),
469 		memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
470 				 v.bv_offset, v.bv_len),
471 		memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
472 	)
473 	kunmap_atomic(kaddr);
474 	return bytes;
475 }
476 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
477 
478 void iov_iter_advance(struct iov_iter *i, size_t size)
479 {
480 	iterate_and_advance(i, size, v, 0, 0, 0)
481 }
482 EXPORT_SYMBOL(iov_iter_advance);
483 
484 /*
485  * Return the count of just the current iov_iter segment.
486  */
487 size_t iov_iter_single_seg_count(const struct iov_iter *i)
488 {
489 	if (i->nr_segs == 1)
490 		return i->count;
491 	else if (i->type & ITER_BVEC)
492 		return min(i->count, i->bvec->bv_len - i->iov_offset);
493 	else
494 		return min(i->count, i->iov->iov_len - i->iov_offset);
495 }
496 EXPORT_SYMBOL(iov_iter_single_seg_count);
497 
498 void iov_iter_kvec(struct iov_iter *i, int direction,
499 			const struct kvec *kvec, unsigned long nr_segs,
500 			size_t count)
501 {
502 	BUG_ON(!(direction & ITER_KVEC));
503 	i->type = direction;
504 	i->kvec = kvec;
505 	i->nr_segs = nr_segs;
506 	i->iov_offset = 0;
507 	i->count = count;
508 }
509 EXPORT_SYMBOL(iov_iter_kvec);
510 
511 void iov_iter_bvec(struct iov_iter *i, int direction,
512 			const struct bio_vec *bvec, unsigned long nr_segs,
513 			size_t count)
514 {
515 	BUG_ON(!(direction & ITER_BVEC));
516 	i->type = direction;
517 	i->bvec = bvec;
518 	i->nr_segs = nr_segs;
519 	i->iov_offset = 0;
520 	i->count = count;
521 }
522 EXPORT_SYMBOL(iov_iter_bvec);
523 
524 unsigned long iov_iter_alignment(const struct iov_iter *i)
525 {
526 	unsigned long res = 0;
527 	size_t size = i->count;
528 
529 	if (!size)
530 		return 0;
531 
532 	iterate_all_kinds(i, size, v,
533 		(res |= (unsigned long)v.iov_base | v.iov_len, 0),
534 		res |= v.bv_offset | v.bv_len,
535 		res |= (unsigned long)v.iov_base | v.iov_len
536 	)
537 	return res;
538 }
539 EXPORT_SYMBOL(iov_iter_alignment);
540 
541 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
542 {
543         unsigned long res = 0;
544 	size_t size = i->count;
545 	if (!size)
546 		return 0;
547 
548 	iterate_all_kinds(i, size, v,
549 		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
550 			(size != v.iov_len ? size : 0), 0),
551 		(res |= (!res ? 0 : (unsigned long)v.bv_offset) |
552 			(size != v.bv_len ? size : 0)),
553 		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
554 			(size != v.iov_len ? size : 0))
555 		);
556 		return res;
557 }
558 EXPORT_SYMBOL(iov_iter_gap_alignment);
559 
560 ssize_t iov_iter_get_pages(struct iov_iter *i,
561 		   struct page **pages, size_t maxsize, unsigned maxpages,
562 		   size_t *start)
563 {
564 	if (maxsize > i->count)
565 		maxsize = i->count;
566 
567 	if (!maxsize)
568 		return 0;
569 
570 	iterate_all_kinds(i, maxsize, v, ({
571 		unsigned long addr = (unsigned long)v.iov_base;
572 		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
573 		int n;
574 		int res;
575 
576 		if (len > maxpages * PAGE_SIZE)
577 			len = maxpages * PAGE_SIZE;
578 		addr &= ~(PAGE_SIZE - 1);
579 		n = DIV_ROUND_UP(len, PAGE_SIZE);
580 		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
581 		if (unlikely(res < 0))
582 			return res;
583 		return (res == n ? len : res * PAGE_SIZE) - *start;
584 	0;}),({
585 		/* can't be more than PAGE_SIZE */
586 		*start = v.bv_offset;
587 		get_page(*pages = v.bv_page);
588 		return v.bv_len;
589 	}),({
590 		return -EFAULT;
591 	})
592 	)
593 	return 0;
594 }
595 EXPORT_SYMBOL(iov_iter_get_pages);
596 
597 static struct page **get_pages_array(size_t n)
598 {
599 	struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
600 	if (!p)
601 		p = vmalloc(n * sizeof(struct page *));
602 	return p;
603 }
604 
605 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
606 		   struct page ***pages, size_t maxsize,
607 		   size_t *start)
608 {
609 	struct page **p;
610 
611 	if (maxsize > i->count)
612 		maxsize = i->count;
613 
614 	if (!maxsize)
615 		return 0;
616 
617 	iterate_all_kinds(i, maxsize, v, ({
618 		unsigned long addr = (unsigned long)v.iov_base;
619 		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
620 		int n;
621 		int res;
622 
623 		addr &= ~(PAGE_SIZE - 1);
624 		n = DIV_ROUND_UP(len, PAGE_SIZE);
625 		p = get_pages_array(n);
626 		if (!p)
627 			return -ENOMEM;
628 		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
629 		if (unlikely(res < 0)) {
630 			kvfree(p);
631 			return res;
632 		}
633 		*pages = p;
634 		return (res == n ? len : res * PAGE_SIZE) - *start;
635 	0;}),({
636 		/* can't be more than PAGE_SIZE */
637 		*start = v.bv_offset;
638 		*pages = p = get_pages_array(1);
639 		if (!p)
640 			return -ENOMEM;
641 		get_page(*p = v.bv_page);
642 		return v.bv_len;
643 	}),({
644 		return -EFAULT;
645 	})
646 	)
647 	return 0;
648 }
649 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
650 
651 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
652 			       struct iov_iter *i)
653 {
654 	char *to = addr;
655 	__wsum sum, next;
656 	size_t off = 0;
657 	sum = *csum;
658 	iterate_and_advance(i, bytes, v, ({
659 		int err = 0;
660 		next = csum_and_copy_from_user(v.iov_base,
661 					       (to += v.iov_len) - v.iov_len,
662 					       v.iov_len, 0, &err);
663 		if (!err) {
664 			sum = csum_block_add(sum, next, off);
665 			off += v.iov_len;
666 		}
667 		err ? v.iov_len : 0;
668 	}), ({
669 		char *p = kmap_atomic(v.bv_page);
670 		next = csum_partial_copy_nocheck(p + v.bv_offset,
671 						 (to += v.bv_len) - v.bv_len,
672 						 v.bv_len, 0);
673 		kunmap_atomic(p);
674 		sum = csum_block_add(sum, next, off);
675 		off += v.bv_len;
676 	}),({
677 		next = csum_partial_copy_nocheck(v.iov_base,
678 						 (to += v.iov_len) - v.iov_len,
679 						 v.iov_len, 0);
680 		sum = csum_block_add(sum, next, off);
681 		off += v.iov_len;
682 	})
683 	)
684 	*csum = sum;
685 	return bytes;
686 }
687 EXPORT_SYMBOL(csum_and_copy_from_iter);
688 
689 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
690 			     struct iov_iter *i)
691 {
692 	const char *from = addr;
693 	__wsum sum, next;
694 	size_t off = 0;
695 	sum = *csum;
696 	iterate_and_advance(i, bytes, v, ({
697 		int err = 0;
698 		next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
699 					     v.iov_base,
700 					     v.iov_len, 0, &err);
701 		if (!err) {
702 			sum = csum_block_add(sum, next, off);
703 			off += v.iov_len;
704 		}
705 		err ? v.iov_len : 0;
706 	}), ({
707 		char *p = kmap_atomic(v.bv_page);
708 		next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
709 						 p + v.bv_offset,
710 						 v.bv_len, 0);
711 		kunmap_atomic(p);
712 		sum = csum_block_add(sum, next, off);
713 		off += v.bv_len;
714 	}),({
715 		next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
716 						 v.iov_base,
717 						 v.iov_len, 0);
718 		sum = csum_block_add(sum, next, off);
719 		off += v.iov_len;
720 	})
721 	)
722 	*csum = sum;
723 	return bytes;
724 }
725 EXPORT_SYMBOL(csum_and_copy_to_iter);
726 
727 int iov_iter_npages(const struct iov_iter *i, int maxpages)
728 {
729 	size_t size = i->count;
730 	int npages = 0;
731 
732 	if (!size)
733 		return 0;
734 
735 	iterate_all_kinds(i, size, v, ({
736 		unsigned long p = (unsigned long)v.iov_base;
737 		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
738 			- p / PAGE_SIZE;
739 		if (npages >= maxpages)
740 			return maxpages;
741 	0;}),({
742 		npages++;
743 		if (npages >= maxpages)
744 			return maxpages;
745 	}),({
746 		unsigned long p = (unsigned long)v.iov_base;
747 		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
748 			- p / PAGE_SIZE;
749 		if (npages >= maxpages)
750 			return maxpages;
751 	})
752 	)
753 	return npages;
754 }
755 EXPORT_SYMBOL(iov_iter_npages);
756 
757 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
758 {
759 	*new = *old;
760 	if (new->type & ITER_BVEC)
761 		return new->bvec = kmemdup(new->bvec,
762 				    new->nr_segs * sizeof(struct bio_vec),
763 				    flags);
764 	else
765 		/* iovec and kvec have identical layout */
766 		return new->iov = kmemdup(new->iov,
767 				   new->nr_segs * sizeof(struct iovec),
768 				   flags);
769 }
770 EXPORT_SYMBOL(dup_iter);
771 
772 int import_iovec(int type, const struct iovec __user * uvector,
773 		 unsigned nr_segs, unsigned fast_segs,
774 		 struct iovec **iov, struct iov_iter *i)
775 {
776 	ssize_t n;
777 	struct iovec *p;
778 	n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
779 				  *iov, &p);
780 	if (n < 0) {
781 		if (p != *iov)
782 			kfree(p);
783 		*iov = NULL;
784 		return n;
785 	}
786 	iov_iter_init(i, type, p, nr_segs, n);
787 	*iov = p == *iov ? NULL : p;
788 	return 0;
789 }
790 EXPORT_SYMBOL(import_iovec);
791 
792 #ifdef CONFIG_COMPAT
793 #include <linux/compat.h>
794 
795 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
796 		 unsigned nr_segs, unsigned fast_segs,
797 		 struct iovec **iov, struct iov_iter *i)
798 {
799 	ssize_t n;
800 	struct iovec *p;
801 	n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
802 				  *iov, &p);
803 	if (n < 0) {
804 		if (p != *iov)
805 			kfree(p);
806 		*iov = NULL;
807 		return n;
808 	}
809 	iov_iter_init(i, type, p, nr_segs, n);
810 	*iov = p == *iov ? NULL : p;
811 	return 0;
812 }
813 #endif
814 
815 int import_single_range(int rw, void __user *buf, size_t len,
816 		 struct iovec *iov, struct iov_iter *i)
817 {
818 	if (len > MAX_RW_COUNT)
819 		len = MAX_RW_COUNT;
820 	if (unlikely(!access_ok(!rw, buf, len)))
821 		return -EFAULT;
822 
823 	iov->iov_base = buf;
824 	iov->iov_len = len;
825 	iov_iter_init(i, rw, iov, 1, len);
826 	return 0;
827 }
828 EXPORT_SYMBOL(import_single_range);
829