xref: /openbmc/linux/lib/iov_iter.c (revision 2f8b5444)
1 #include <linux/export.h>
2 #include <linux/bvec.h>
3 #include <linux/uio.h>
4 #include <linux/pagemap.h>
5 #include <linux/slab.h>
6 #include <linux/vmalloc.h>
7 #include <linux/splice.h>
8 #include <net/checksum.h>
9 
10 #define PIPE_PARANOIA /* for now */
11 
12 #define iterate_iovec(i, n, __v, __p, skip, STEP) {	\
13 	size_t left;					\
14 	size_t wanted = n;				\
15 	__p = i->iov;					\
16 	__v.iov_len = min(n, __p->iov_len - skip);	\
17 	if (likely(__v.iov_len)) {			\
18 		__v.iov_base = __p->iov_base + skip;	\
19 		left = (STEP);				\
20 		__v.iov_len -= left;			\
21 		skip += __v.iov_len;			\
22 		n -= __v.iov_len;			\
23 	} else {					\
24 		left = 0;				\
25 	}						\
26 	while (unlikely(!left && n)) {			\
27 		__p++;					\
28 		__v.iov_len = min(n, __p->iov_len);	\
29 		if (unlikely(!__v.iov_len))		\
30 			continue;			\
31 		__v.iov_base = __p->iov_base;		\
32 		left = (STEP);				\
33 		__v.iov_len -= left;			\
34 		skip = __v.iov_len;			\
35 		n -= __v.iov_len;			\
36 	}						\
37 	n = wanted - n;					\
38 }
39 
40 #define iterate_kvec(i, n, __v, __p, skip, STEP) {	\
41 	size_t wanted = n;				\
42 	__p = i->kvec;					\
43 	__v.iov_len = min(n, __p->iov_len - skip);	\
44 	if (likely(__v.iov_len)) {			\
45 		__v.iov_base = __p->iov_base + skip;	\
46 		(void)(STEP);				\
47 		skip += __v.iov_len;			\
48 		n -= __v.iov_len;			\
49 	}						\
50 	while (unlikely(n)) {				\
51 		__p++;					\
52 		__v.iov_len = min(n, __p->iov_len);	\
53 		if (unlikely(!__v.iov_len))		\
54 			continue;			\
55 		__v.iov_base = __p->iov_base;		\
56 		(void)(STEP);				\
57 		skip = __v.iov_len;			\
58 		n -= __v.iov_len;			\
59 	}						\
60 	n = wanted;					\
61 }
62 
63 #define iterate_bvec(i, n, __v, __bi, skip, STEP) {	\
64 	struct bvec_iter __start;			\
65 	__start.bi_size = n;				\
66 	__start.bi_bvec_done = skip;			\
67 	__start.bi_idx = 0;				\
68 	for_each_bvec(__v, i->bvec, __bi, __start) {	\
69 		if (!__v.bv_len)			\
70 			continue;			\
71 		(void)(STEP);				\
72 	}						\
73 }
74 
75 #define iterate_all_kinds(i, n, v, I, B, K) {			\
76 	size_t skip = i->iov_offset;				\
77 	if (unlikely(i->type & ITER_BVEC)) {			\
78 		struct bio_vec v;				\
79 		struct bvec_iter __bi;				\
80 		iterate_bvec(i, n, v, __bi, skip, (B))		\
81 	} else if (unlikely(i->type & ITER_KVEC)) {		\
82 		const struct kvec *kvec;			\
83 		struct kvec v;					\
84 		iterate_kvec(i, n, v, kvec, skip, (K))		\
85 	} else {						\
86 		const struct iovec *iov;			\
87 		struct iovec v;					\
88 		iterate_iovec(i, n, v, iov, skip, (I))		\
89 	}							\
90 }
91 
92 #define iterate_and_advance(i, n, v, I, B, K) {			\
93 	if (unlikely(i->count < n))				\
94 		n = i->count;					\
95 	if (i->count) {						\
96 		size_t skip = i->iov_offset;			\
97 		if (unlikely(i->type & ITER_BVEC)) {		\
98 			const struct bio_vec *bvec = i->bvec;	\
99 			struct bio_vec v;			\
100 			struct bvec_iter __bi;			\
101 			iterate_bvec(i, n, v, __bi, skip, (B))	\
102 			i->bvec = __bvec_iter_bvec(i->bvec, __bi);	\
103 			i->nr_segs -= i->bvec - bvec;		\
104 			skip = __bi.bi_bvec_done;		\
105 		} else if (unlikely(i->type & ITER_KVEC)) {	\
106 			const struct kvec *kvec;		\
107 			struct kvec v;				\
108 			iterate_kvec(i, n, v, kvec, skip, (K))	\
109 			if (skip == kvec->iov_len) {		\
110 				kvec++;				\
111 				skip = 0;			\
112 			}					\
113 			i->nr_segs -= kvec - i->kvec;		\
114 			i->kvec = kvec;				\
115 		} else {					\
116 			const struct iovec *iov;		\
117 			struct iovec v;				\
118 			iterate_iovec(i, n, v, iov, skip, (I))	\
119 			if (skip == iov->iov_len) {		\
120 				iov++;				\
121 				skip = 0;			\
122 			}					\
123 			i->nr_segs -= iov - i->iov;		\
124 			i->iov = iov;				\
125 		}						\
126 		i->count -= n;					\
127 		i->iov_offset = skip;				\
128 	}							\
129 }
130 
131 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
132 			 struct iov_iter *i)
133 {
134 	size_t skip, copy, left, wanted;
135 	const struct iovec *iov;
136 	char __user *buf;
137 	void *kaddr, *from;
138 
139 	if (unlikely(bytes > i->count))
140 		bytes = i->count;
141 
142 	if (unlikely(!bytes))
143 		return 0;
144 
145 	wanted = bytes;
146 	iov = i->iov;
147 	skip = i->iov_offset;
148 	buf = iov->iov_base + skip;
149 	copy = min(bytes, iov->iov_len - skip);
150 
151 	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
152 		kaddr = kmap_atomic(page);
153 		from = kaddr + offset;
154 
155 		/* first chunk, usually the only one */
156 		left = __copy_to_user_inatomic(buf, from, copy);
157 		copy -= left;
158 		skip += copy;
159 		from += copy;
160 		bytes -= copy;
161 
162 		while (unlikely(!left && bytes)) {
163 			iov++;
164 			buf = iov->iov_base;
165 			copy = min(bytes, iov->iov_len);
166 			left = __copy_to_user_inatomic(buf, from, copy);
167 			copy -= left;
168 			skip = copy;
169 			from += copy;
170 			bytes -= copy;
171 		}
172 		if (likely(!bytes)) {
173 			kunmap_atomic(kaddr);
174 			goto done;
175 		}
176 		offset = from - kaddr;
177 		buf += copy;
178 		kunmap_atomic(kaddr);
179 		copy = min(bytes, iov->iov_len - skip);
180 	}
181 	/* Too bad - revert to non-atomic kmap */
182 
183 	kaddr = kmap(page);
184 	from = kaddr + offset;
185 	left = __copy_to_user(buf, from, copy);
186 	copy -= left;
187 	skip += copy;
188 	from += copy;
189 	bytes -= copy;
190 	while (unlikely(!left && bytes)) {
191 		iov++;
192 		buf = iov->iov_base;
193 		copy = min(bytes, iov->iov_len);
194 		left = __copy_to_user(buf, from, copy);
195 		copy -= left;
196 		skip = copy;
197 		from += copy;
198 		bytes -= copy;
199 	}
200 	kunmap(page);
201 
202 done:
203 	if (skip == iov->iov_len) {
204 		iov++;
205 		skip = 0;
206 	}
207 	i->count -= wanted - bytes;
208 	i->nr_segs -= iov - i->iov;
209 	i->iov = iov;
210 	i->iov_offset = skip;
211 	return wanted - bytes;
212 }
213 
214 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
215 			 struct iov_iter *i)
216 {
217 	size_t skip, copy, left, wanted;
218 	const struct iovec *iov;
219 	char __user *buf;
220 	void *kaddr, *to;
221 
222 	if (unlikely(bytes > i->count))
223 		bytes = i->count;
224 
225 	if (unlikely(!bytes))
226 		return 0;
227 
228 	wanted = bytes;
229 	iov = i->iov;
230 	skip = i->iov_offset;
231 	buf = iov->iov_base + skip;
232 	copy = min(bytes, iov->iov_len - skip);
233 
234 	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
235 		kaddr = kmap_atomic(page);
236 		to = kaddr + offset;
237 
238 		/* first chunk, usually the only one */
239 		left = __copy_from_user_inatomic(to, buf, copy);
240 		copy -= left;
241 		skip += copy;
242 		to += copy;
243 		bytes -= copy;
244 
245 		while (unlikely(!left && bytes)) {
246 			iov++;
247 			buf = iov->iov_base;
248 			copy = min(bytes, iov->iov_len);
249 			left = __copy_from_user_inatomic(to, buf, copy);
250 			copy -= left;
251 			skip = copy;
252 			to += copy;
253 			bytes -= copy;
254 		}
255 		if (likely(!bytes)) {
256 			kunmap_atomic(kaddr);
257 			goto done;
258 		}
259 		offset = to - kaddr;
260 		buf += copy;
261 		kunmap_atomic(kaddr);
262 		copy = min(bytes, iov->iov_len - skip);
263 	}
264 	/* Too bad - revert to non-atomic kmap */
265 
266 	kaddr = kmap(page);
267 	to = kaddr + offset;
268 	left = __copy_from_user(to, buf, copy);
269 	copy -= left;
270 	skip += copy;
271 	to += copy;
272 	bytes -= copy;
273 	while (unlikely(!left && bytes)) {
274 		iov++;
275 		buf = iov->iov_base;
276 		copy = min(bytes, iov->iov_len);
277 		left = __copy_from_user(to, buf, copy);
278 		copy -= left;
279 		skip = copy;
280 		to += copy;
281 		bytes -= copy;
282 	}
283 	kunmap(page);
284 
285 done:
286 	if (skip == iov->iov_len) {
287 		iov++;
288 		skip = 0;
289 	}
290 	i->count -= wanted - bytes;
291 	i->nr_segs -= iov - i->iov;
292 	i->iov = iov;
293 	i->iov_offset = skip;
294 	return wanted - bytes;
295 }
296 
297 #ifdef PIPE_PARANOIA
298 static bool sanity(const struct iov_iter *i)
299 {
300 	struct pipe_inode_info *pipe = i->pipe;
301 	int idx = i->idx;
302 	int next = pipe->curbuf + pipe->nrbufs;
303 	if (i->iov_offset) {
304 		struct pipe_buffer *p;
305 		if (unlikely(!pipe->nrbufs))
306 			goto Bad;	// pipe must be non-empty
307 		if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
308 			goto Bad;	// must be at the last buffer...
309 
310 		p = &pipe->bufs[idx];
311 		if (unlikely(p->offset + p->len != i->iov_offset))
312 			goto Bad;	// ... at the end of segment
313 	} else {
314 		if (idx != (next & (pipe->buffers - 1)))
315 			goto Bad;	// must be right after the last buffer
316 	}
317 	return true;
318 Bad:
319 	printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
320 	printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
321 			pipe->curbuf, pipe->nrbufs, pipe->buffers);
322 	for (idx = 0; idx < pipe->buffers; idx++)
323 		printk(KERN_ERR "[%p %p %d %d]\n",
324 			pipe->bufs[idx].ops,
325 			pipe->bufs[idx].page,
326 			pipe->bufs[idx].offset,
327 			pipe->bufs[idx].len);
328 	WARN_ON(1);
329 	return false;
330 }
331 #else
332 #define sanity(i) true
333 #endif
334 
335 static inline int next_idx(int idx, struct pipe_inode_info *pipe)
336 {
337 	return (idx + 1) & (pipe->buffers - 1);
338 }
339 
340 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
341 			 struct iov_iter *i)
342 {
343 	struct pipe_inode_info *pipe = i->pipe;
344 	struct pipe_buffer *buf;
345 	size_t off;
346 	int idx;
347 
348 	if (unlikely(bytes > i->count))
349 		bytes = i->count;
350 
351 	if (unlikely(!bytes))
352 		return 0;
353 
354 	if (!sanity(i))
355 		return 0;
356 
357 	off = i->iov_offset;
358 	idx = i->idx;
359 	buf = &pipe->bufs[idx];
360 	if (off) {
361 		if (offset == off && buf->page == page) {
362 			/* merge with the last one */
363 			buf->len += bytes;
364 			i->iov_offset += bytes;
365 			goto out;
366 		}
367 		idx = next_idx(idx, pipe);
368 		buf = &pipe->bufs[idx];
369 	}
370 	if (idx == pipe->curbuf && pipe->nrbufs)
371 		return 0;
372 	pipe->nrbufs++;
373 	buf->ops = &page_cache_pipe_buf_ops;
374 	get_page(buf->page = page);
375 	buf->offset = offset;
376 	buf->len = bytes;
377 	i->iov_offset = offset + bytes;
378 	i->idx = idx;
379 out:
380 	i->count -= bytes;
381 	return bytes;
382 }
383 
384 /*
385  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
386  * bytes.  For each iovec, fault in each page that constitutes the iovec.
387  *
388  * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
389  * because it is an invalid address).
390  */
391 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
392 {
393 	size_t skip = i->iov_offset;
394 	const struct iovec *iov;
395 	int err;
396 	struct iovec v;
397 
398 	if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
399 		iterate_iovec(i, bytes, v, iov, skip, ({
400 			err = fault_in_pages_readable(v.iov_base, v.iov_len);
401 			if (unlikely(err))
402 			return err;
403 		0;}))
404 	}
405 	return 0;
406 }
407 EXPORT_SYMBOL(iov_iter_fault_in_readable);
408 
409 void iov_iter_init(struct iov_iter *i, int direction,
410 			const struct iovec *iov, unsigned long nr_segs,
411 			size_t count)
412 {
413 	/* It will get better.  Eventually... */
414 	if (segment_eq(get_fs(), KERNEL_DS)) {
415 		direction |= ITER_KVEC;
416 		i->type = direction;
417 		i->kvec = (struct kvec *)iov;
418 	} else {
419 		i->type = direction;
420 		i->iov = iov;
421 	}
422 	i->nr_segs = nr_segs;
423 	i->iov_offset = 0;
424 	i->count = count;
425 }
426 EXPORT_SYMBOL(iov_iter_init);
427 
428 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
429 {
430 	char *from = kmap_atomic(page);
431 	memcpy(to, from + offset, len);
432 	kunmap_atomic(from);
433 }
434 
435 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
436 {
437 	char *to = kmap_atomic(page);
438 	memcpy(to + offset, from, len);
439 	kunmap_atomic(to);
440 }
441 
442 static void memzero_page(struct page *page, size_t offset, size_t len)
443 {
444 	char *addr = kmap_atomic(page);
445 	memset(addr + offset, 0, len);
446 	kunmap_atomic(addr);
447 }
448 
449 static inline bool allocated(struct pipe_buffer *buf)
450 {
451 	return buf->ops == &default_pipe_buf_ops;
452 }
453 
454 static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
455 {
456 	size_t off = i->iov_offset;
457 	int idx = i->idx;
458 	if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
459 		idx = next_idx(idx, i->pipe);
460 		off = 0;
461 	}
462 	*idxp = idx;
463 	*offp = off;
464 }
465 
466 static size_t push_pipe(struct iov_iter *i, size_t size,
467 			int *idxp, size_t *offp)
468 {
469 	struct pipe_inode_info *pipe = i->pipe;
470 	size_t off;
471 	int idx;
472 	ssize_t left;
473 
474 	if (unlikely(size > i->count))
475 		size = i->count;
476 	if (unlikely(!size))
477 		return 0;
478 
479 	left = size;
480 	data_start(i, &idx, &off);
481 	*idxp = idx;
482 	*offp = off;
483 	if (off) {
484 		left -= PAGE_SIZE - off;
485 		if (left <= 0) {
486 			pipe->bufs[idx].len += size;
487 			return size;
488 		}
489 		pipe->bufs[idx].len = PAGE_SIZE;
490 		idx = next_idx(idx, pipe);
491 	}
492 	while (idx != pipe->curbuf || !pipe->nrbufs) {
493 		struct page *page = alloc_page(GFP_USER);
494 		if (!page)
495 			break;
496 		pipe->nrbufs++;
497 		pipe->bufs[idx].ops = &default_pipe_buf_ops;
498 		pipe->bufs[idx].page = page;
499 		pipe->bufs[idx].offset = 0;
500 		if (left <= PAGE_SIZE) {
501 			pipe->bufs[idx].len = left;
502 			return size;
503 		}
504 		pipe->bufs[idx].len = PAGE_SIZE;
505 		left -= PAGE_SIZE;
506 		idx = next_idx(idx, pipe);
507 	}
508 	return size - left;
509 }
510 
511 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
512 				struct iov_iter *i)
513 {
514 	struct pipe_inode_info *pipe = i->pipe;
515 	size_t n, off;
516 	int idx;
517 
518 	if (!sanity(i))
519 		return 0;
520 
521 	bytes = n = push_pipe(i, bytes, &idx, &off);
522 	if (unlikely(!n))
523 		return 0;
524 	for ( ; n; idx = next_idx(idx, pipe), off = 0) {
525 		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
526 		memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
527 		i->idx = idx;
528 		i->iov_offset = off + chunk;
529 		n -= chunk;
530 		addr += chunk;
531 	}
532 	i->count -= bytes;
533 	return bytes;
534 }
535 
536 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
537 {
538 	const char *from = addr;
539 	if (unlikely(i->type & ITER_PIPE))
540 		return copy_pipe_to_iter(addr, bytes, i);
541 	iterate_and_advance(i, bytes, v,
542 		__copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
543 			       v.iov_len),
544 		memcpy_to_page(v.bv_page, v.bv_offset,
545 			       (from += v.bv_len) - v.bv_len, v.bv_len),
546 		memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
547 	)
548 
549 	return bytes;
550 }
551 EXPORT_SYMBOL(copy_to_iter);
552 
553 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
554 {
555 	char *to = addr;
556 	if (unlikely(i->type & ITER_PIPE)) {
557 		WARN_ON(1);
558 		return 0;
559 	}
560 	iterate_and_advance(i, bytes, v,
561 		__copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
562 				 v.iov_len),
563 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
564 				 v.bv_offset, v.bv_len),
565 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
566 	)
567 
568 	return bytes;
569 }
570 EXPORT_SYMBOL(copy_from_iter);
571 
572 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
573 {
574 	char *to = addr;
575 	if (unlikely(i->type & ITER_PIPE)) {
576 		WARN_ON(1);
577 		return 0;
578 	}
579 	iterate_and_advance(i, bytes, v,
580 		__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
581 					 v.iov_base, v.iov_len),
582 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
583 				 v.bv_offset, v.bv_len),
584 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
585 	)
586 
587 	return bytes;
588 }
589 EXPORT_SYMBOL(copy_from_iter_nocache);
590 
591 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
592 			 struct iov_iter *i)
593 {
594 	if (i->type & (ITER_BVEC|ITER_KVEC)) {
595 		void *kaddr = kmap_atomic(page);
596 		size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
597 		kunmap_atomic(kaddr);
598 		return wanted;
599 	} else if (likely(!(i->type & ITER_PIPE)))
600 		return copy_page_to_iter_iovec(page, offset, bytes, i);
601 	else
602 		return copy_page_to_iter_pipe(page, offset, bytes, i);
603 }
604 EXPORT_SYMBOL(copy_page_to_iter);
605 
606 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
607 			 struct iov_iter *i)
608 {
609 	if (unlikely(i->type & ITER_PIPE)) {
610 		WARN_ON(1);
611 		return 0;
612 	}
613 	if (i->type & (ITER_BVEC|ITER_KVEC)) {
614 		void *kaddr = kmap_atomic(page);
615 		size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
616 		kunmap_atomic(kaddr);
617 		return wanted;
618 	} else
619 		return copy_page_from_iter_iovec(page, offset, bytes, i);
620 }
621 EXPORT_SYMBOL(copy_page_from_iter);
622 
623 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
624 {
625 	struct pipe_inode_info *pipe = i->pipe;
626 	size_t n, off;
627 	int idx;
628 
629 	if (!sanity(i))
630 		return 0;
631 
632 	bytes = n = push_pipe(i, bytes, &idx, &off);
633 	if (unlikely(!n))
634 		return 0;
635 
636 	for ( ; n; idx = next_idx(idx, pipe), off = 0) {
637 		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
638 		memzero_page(pipe->bufs[idx].page, off, chunk);
639 		i->idx = idx;
640 		i->iov_offset = off + chunk;
641 		n -= chunk;
642 	}
643 	i->count -= bytes;
644 	return bytes;
645 }
646 
647 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
648 {
649 	if (unlikely(i->type & ITER_PIPE))
650 		return pipe_zero(bytes, i);
651 	iterate_and_advance(i, bytes, v,
652 		__clear_user(v.iov_base, v.iov_len),
653 		memzero_page(v.bv_page, v.bv_offset, v.bv_len),
654 		memset(v.iov_base, 0, v.iov_len)
655 	)
656 
657 	return bytes;
658 }
659 EXPORT_SYMBOL(iov_iter_zero);
660 
661 size_t iov_iter_copy_from_user_atomic(struct page *page,
662 		struct iov_iter *i, unsigned long offset, size_t bytes)
663 {
664 	char *kaddr = kmap_atomic(page), *p = kaddr + offset;
665 	if (unlikely(i->type & ITER_PIPE)) {
666 		kunmap_atomic(kaddr);
667 		WARN_ON(1);
668 		return 0;
669 	}
670 	iterate_all_kinds(i, bytes, v,
671 		__copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
672 					  v.iov_base, v.iov_len),
673 		memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
674 				 v.bv_offset, v.bv_len),
675 		memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
676 	)
677 	kunmap_atomic(kaddr);
678 	return bytes;
679 }
680 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
681 
682 static void pipe_advance(struct iov_iter *i, size_t size)
683 {
684 	struct pipe_inode_info *pipe = i->pipe;
685 	struct pipe_buffer *buf;
686 	int idx = i->idx;
687 	size_t off = i->iov_offset;
688 
689 	if (unlikely(i->count < size))
690 		size = i->count;
691 
692 	if (size) {
693 		if (off) /* make it relative to the beginning of buffer */
694 			size += off - pipe->bufs[idx].offset;
695 		while (1) {
696 			buf = &pipe->bufs[idx];
697 			if (size <= buf->len)
698 				break;
699 			size -= buf->len;
700 			idx = next_idx(idx, pipe);
701 		}
702 		buf->len = size;
703 		i->idx = idx;
704 		off = i->iov_offset = buf->offset + size;
705 	}
706 	if (off)
707 		idx = next_idx(idx, pipe);
708 	if (pipe->nrbufs) {
709 		int unused = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
710 		/* [curbuf,unused) is in use.  Free [idx,unused) */
711 		while (idx != unused) {
712 			pipe_buf_release(pipe, &pipe->bufs[idx]);
713 			idx = next_idx(idx, pipe);
714 			pipe->nrbufs--;
715 		}
716 	}
717 }
718 
719 void iov_iter_advance(struct iov_iter *i, size_t size)
720 {
721 	if (unlikely(i->type & ITER_PIPE)) {
722 		pipe_advance(i, size);
723 		return;
724 	}
725 	iterate_and_advance(i, size, v, 0, 0, 0)
726 }
727 EXPORT_SYMBOL(iov_iter_advance);
728 
729 /*
730  * Return the count of just the current iov_iter segment.
731  */
732 size_t iov_iter_single_seg_count(const struct iov_iter *i)
733 {
734 	if (unlikely(i->type & ITER_PIPE))
735 		return i->count;	// it is a silly place, anyway
736 	if (i->nr_segs == 1)
737 		return i->count;
738 	else if (i->type & ITER_BVEC)
739 		return min(i->count, i->bvec->bv_len - i->iov_offset);
740 	else
741 		return min(i->count, i->iov->iov_len - i->iov_offset);
742 }
743 EXPORT_SYMBOL(iov_iter_single_seg_count);
744 
745 void iov_iter_kvec(struct iov_iter *i, int direction,
746 			const struct kvec *kvec, unsigned long nr_segs,
747 			size_t count)
748 {
749 	BUG_ON(!(direction & ITER_KVEC));
750 	i->type = direction;
751 	i->kvec = kvec;
752 	i->nr_segs = nr_segs;
753 	i->iov_offset = 0;
754 	i->count = count;
755 }
756 EXPORT_SYMBOL(iov_iter_kvec);
757 
758 void iov_iter_bvec(struct iov_iter *i, int direction,
759 			const struct bio_vec *bvec, unsigned long nr_segs,
760 			size_t count)
761 {
762 	BUG_ON(!(direction & ITER_BVEC));
763 	i->type = direction;
764 	i->bvec = bvec;
765 	i->nr_segs = nr_segs;
766 	i->iov_offset = 0;
767 	i->count = count;
768 }
769 EXPORT_SYMBOL(iov_iter_bvec);
770 
771 void iov_iter_pipe(struct iov_iter *i, int direction,
772 			struct pipe_inode_info *pipe,
773 			size_t count)
774 {
775 	BUG_ON(direction != ITER_PIPE);
776 	i->type = direction;
777 	i->pipe = pipe;
778 	i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
779 	i->iov_offset = 0;
780 	i->count = count;
781 }
782 EXPORT_SYMBOL(iov_iter_pipe);
783 
784 unsigned long iov_iter_alignment(const struct iov_iter *i)
785 {
786 	unsigned long res = 0;
787 	size_t size = i->count;
788 
789 	if (!size)
790 		return 0;
791 
792 	if (unlikely(i->type & ITER_PIPE)) {
793 		if (i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
794 			return size | i->iov_offset;
795 		return size;
796 	}
797 	iterate_all_kinds(i, size, v,
798 		(res |= (unsigned long)v.iov_base | v.iov_len, 0),
799 		res |= v.bv_offset | v.bv_len,
800 		res |= (unsigned long)v.iov_base | v.iov_len
801 	)
802 	return res;
803 }
804 EXPORT_SYMBOL(iov_iter_alignment);
805 
806 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
807 {
808         unsigned long res = 0;
809 	size_t size = i->count;
810 	if (!size)
811 		return 0;
812 
813 	if (unlikely(i->type & ITER_PIPE)) {
814 		WARN_ON(1);
815 		return ~0U;
816 	}
817 
818 	iterate_all_kinds(i, size, v,
819 		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
820 			(size != v.iov_len ? size : 0), 0),
821 		(res |= (!res ? 0 : (unsigned long)v.bv_offset) |
822 			(size != v.bv_len ? size : 0)),
823 		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
824 			(size != v.iov_len ? size : 0))
825 		);
826 		return res;
827 }
828 EXPORT_SYMBOL(iov_iter_gap_alignment);
829 
830 static inline size_t __pipe_get_pages(struct iov_iter *i,
831 				size_t maxsize,
832 				struct page **pages,
833 				int idx,
834 				size_t *start)
835 {
836 	struct pipe_inode_info *pipe = i->pipe;
837 	ssize_t n = push_pipe(i, maxsize, &idx, start);
838 	if (!n)
839 		return -EFAULT;
840 
841 	maxsize = n;
842 	n += *start;
843 	while (n > 0) {
844 		get_page(*pages++ = pipe->bufs[idx].page);
845 		idx = next_idx(idx, pipe);
846 		n -= PAGE_SIZE;
847 	}
848 
849 	return maxsize;
850 }
851 
852 static ssize_t pipe_get_pages(struct iov_iter *i,
853 		   struct page **pages, size_t maxsize, unsigned maxpages,
854 		   size_t *start)
855 {
856 	unsigned npages;
857 	size_t capacity;
858 	int idx;
859 
860 	if (!sanity(i))
861 		return -EFAULT;
862 
863 	data_start(i, &idx, start);
864 	/* some of this one + all after this one */
865 	npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
866 	capacity = min(npages,maxpages) * PAGE_SIZE - *start;
867 
868 	return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
869 }
870 
871 ssize_t iov_iter_get_pages(struct iov_iter *i,
872 		   struct page **pages, size_t maxsize, unsigned maxpages,
873 		   size_t *start)
874 {
875 	if (maxsize > i->count)
876 		maxsize = i->count;
877 
878 	if (!maxsize)
879 		return 0;
880 
881 	if (unlikely(i->type & ITER_PIPE))
882 		return pipe_get_pages(i, pages, maxsize, maxpages, start);
883 	iterate_all_kinds(i, maxsize, v, ({
884 		unsigned long addr = (unsigned long)v.iov_base;
885 		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
886 		int n;
887 		int res;
888 
889 		if (len > maxpages * PAGE_SIZE)
890 			len = maxpages * PAGE_SIZE;
891 		addr &= ~(PAGE_SIZE - 1);
892 		n = DIV_ROUND_UP(len, PAGE_SIZE);
893 		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
894 		if (unlikely(res < 0))
895 			return res;
896 		return (res == n ? len : res * PAGE_SIZE) - *start;
897 	0;}),({
898 		/* can't be more than PAGE_SIZE */
899 		*start = v.bv_offset;
900 		get_page(*pages = v.bv_page);
901 		return v.bv_len;
902 	}),({
903 		return -EFAULT;
904 	})
905 	)
906 	return 0;
907 }
908 EXPORT_SYMBOL(iov_iter_get_pages);
909 
910 static struct page **get_pages_array(size_t n)
911 {
912 	struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
913 	if (!p)
914 		p = vmalloc(n * sizeof(struct page *));
915 	return p;
916 }
917 
918 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
919 		   struct page ***pages, size_t maxsize,
920 		   size_t *start)
921 {
922 	struct page **p;
923 	size_t n;
924 	int idx;
925 	int npages;
926 
927 	if (!sanity(i))
928 		return -EFAULT;
929 
930 	data_start(i, &idx, start);
931 	/* some of this one + all after this one */
932 	npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
933 	n = npages * PAGE_SIZE - *start;
934 	if (maxsize > n)
935 		maxsize = n;
936 	else
937 		npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
938 	p = get_pages_array(npages);
939 	if (!p)
940 		return -ENOMEM;
941 	n = __pipe_get_pages(i, maxsize, p, idx, start);
942 	if (n > 0)
943 		*pages = p;
944 	else
945 		kvfree(p);
946 	return n;
947 }
948 
949 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
950 		   struct page ***pages, size_t maxsize,
951 		   size_t *start)
952 {
953 	struct page **p;
954 
955 	if (maxsize > i->count)
956 		maxsize = i->count;
957 
958 	if (!maxsize)
959 		return 0;
960 
961 	if (unlikely(i->type & ITER_PIPE))
962 		return pipe_get_pages_alloc(i, pages, maxsize, start);
963 	iterate_all_kinds(i, maxsize, v, ({
964 		unsigned long addr = (unsigned long)v.iov_base;
965 		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
966 		int n;
967 		int res;
968 
969 		addr &= ~(PAGE_SIZE - 1);
970 		n = DIV_ROUND_UP(len, PAGE_SIZE);
971 		p = get_pages_array(n);
972 		if (!p)
973 			return -ENOMEM;
974 		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
975 		if (unlikely(res < 0)) {
976 			kvfree(p);
977 			return res;
978 		}
979 		*pages = p;
980 		return (res == n ? len : res * PAGE_SIZE) - *start;
981 	0;}),({
982 		/* can't be more than PAGE_SIZE */
983 		*start = v.bv_offset;
984 		*pages = p = get_pages_array(1);
985 		if (!p)
986 			return -ENOMEM;
987 		get_page(*p = v.bv_page);
988 		return v.bv_len;
989 	}),({
990 		return -EFAULT;
991 	})
992 	)
993 	return 0;
994 }
995 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
996 
997 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
998 			       struct iov_iter *i)
999 {
1000 	char *to = addr;
1001 	__wsum sum, next;
1002 	size_t off = 0;
1003 	sum = *csum;
1004 	if (unlikely(i->type & ITER_PIPE)) {
1005 		WARN_ON(1);
1006 		return 0;
1007 	}
1008 	iterate_and_advance(i, bytes, v, ({
1009 		int err = 0;
1010 		next = csum_and_copy_from_user(v.iov_base,
1011 					       (to += v.iov_len) - v.iov_len,
1012 					       v.iov_len, 0, &err);
1013 		if (!err) {
1014 			sum = csum_block_add(sum, next, off);
1015 			off += v.iov_len;
1016 		}
1017 		err ? v.iov_len : 0;
1018 	}), ({
1019 		char *p = kmap_atomic(v.bv_page);
1020 		next = csum_partial_copy_nocheck(p + v.bv_offset,
1021 						 (to += v.bv_len) - v.bv_len,
1022 						 v.bv_len, 0);
1023 		kunmap_atomic(p);
1024 		sum = csum_block_add(sum, next, off);
1025 		off += v.bv_len;
1026 	}),({
1027 		next = csum_partial_copy_nocheck(v.iov_base,
1028 						 (to += v.iov_len) - v.iov_len,
1029 						 v.iov_len, 0);
1030 		sum = csum_block_add(sum, next, off);
1031 		off += v.iov_len;
1032 	})
1033 	)
1034 	*csum = sum;
1035 	return bytes;
1036 }
1037 EXPORT_SYMBOL(csum_and_copy_from_iter);
1038 
1039 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
1040 			     struct iov_iter *i)
1041 {
1042 	const char *from = addr;
1043 	__wsum sum, next;
1044 	size_t off = 0;
1045 	sum = *csum;
1046 	if (unlikely(i->type & ITER_PIPE)) {
1047 		WARN_ON(1);	/* for now */
1048 		return 0;
1049 	}
1050 	iterate_and_advance(i, bytes, v, ({
1051 		int err = 0;
1052 		next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1053 					     v.iov_base,
1054 					     v.iov_len, 0, &err);
1055 		if (!err) {
1056 			sum = csum_block_add(sum, next, off);
1057 			off += v.iov_len;
1058 		}
1059 		err ? v.iov_len : 0;
1060 	}), ({
1061 		char *p = kmap_atomic(v.bv_page);
1062 		next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
1063 						 p + v.bv_offset,
1064 						 v.bv_len, 0);
1065 		kunmap_atomic(p);
1066 		sum = csum_block_add(sum, next, off);
1067 		off += v.bv_len;
1068 	}),({
1069 		next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
1070 						 v.iov_base,
1071 						 v.iov_len, 0);
1072 		sum = csum_block_add(sum, next, off);
1073 		off += v.iov_len;
1074 	})
1075 	)
1076 	*csum = sum;
1077 	return bytes;
1078 }
1079 EXPORT_SYMBOL(csum_and_copy_to_iter);
1080 
1081 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1082 {
1083 	size_t size = i->count;
1084 	int npages = 0;
1085 
1086 	if (!size)
1087 		return 0;
1088 
1089 	if (unlikely(i->type & ITER_PIPE)) {
1090 		struct pipe_inode_info *pipe = i->pipe;
1091 		size_t off;
1092 		int idx;
1093 
1094 		if (!sanity(i))
1095 			return 0;
1096 
1097 		data_start(i, &idx, &off);
1098 		/* some of this one + all after this one */
1099 		npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
1100 		if (npages >= maxpages)
1101 			return maxpages;
1102 	} else iterate_all_kinds(i, size, v, ({
1103 		unsigned long p = (unsigned long)v.iov_base;
1104 		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1105 			- p / PAGE_SIZE;
1106 		if (npages >= maxpages)
1107 			return maxpages;
1108 	0;}),({
1109 		npages++;
1110 		if (npages >= maxpages)
1111 			return maxpages;
1112 	}),({
1113 		unsigned long p = (unsigned long)v.iov_base;
1114 		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1115 			- p / PAGE_SIZE;
1116 		if (npages >= maxpages)
1117 			return maxpages;
1118 	})
1119 	)
1120 	return npages;
1121 }
1122 EXPORT_SYMBOL(iov_iter_npages);
1123 
1124 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1125 {
1126 	*new = *old;
1127 	if (unlikely(new->type & ITER_PIPE)) {
1128 		WARN_ON(1);
1129 		return NULL;
1130 	}
1131 	if (new->type & ITER_BVEC)
1132 		return new->bvec = kmemdup(new->bvec,
1133 				    new->nr_segs * sizeof(struct bio_vec),
1134 				    flags);
1135 	else
1136 		/* iovec and kvec have identical layout */
1137 		return new->iov = kmemdup(new->iov,
1138 				   new->nr_segs * sizeof(struct iovec),
1139 				   flags);
1140 }
1141 EXPORT_SYMBOL(dup_iter);
1142 
1143 /**
1144  * import_iovec() - Copy an array of &struct iovec from userspace
1145  *     into the kernel, check that it is valid, and initialize a new
1146  *     &struct iov_iter iterator to access it.
1147  *
1148  * @type: One of %READ or %WRITE.
1149  * @uvector: Pointer to the userspace array.
1150  * @nr_segs: Number of elements in userspace array.
1151  * @fast_segs: Number of elements in @iov.
1152  * @iov: (input and output parameter) Pointer to pointer to (usually small
1153  *     on-stack) kernel array.
1154  * @i: Pointer to iterator that will be initialized on success.
1155  *
1156  * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1157  * then this function places %NULL in *@iov on return. Otherwise, a new
1158  * array will be allocated and the result placed in *@iov. This means that
1159  * the caller may call kfree() on *@iov regardless of whether the small
1160  * on-stack array was used or not (and regardless of whether this function
1161  * returns an error or not).
1162  *
1163  * Return: 0 on success or negative error code on error.
1164  */
1165 int import_iovec(int type, const struct iovec __user * uvector,
1166 		 unsigned nr_segs, unsigned fast_segs,
1167 		 struct iovec **iov, struct iov_iter *i)
1168 {
1169 	ssize_t n;
1170 	struct iovec *p;
1171 	n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1172 				  *iov, &p);
1173 	if (n < 0) {
1174 		if (p != *iov)
1175 			kfree(p);
1176 		*iov = NULL;
1177 		return n;
1178 	}
1179 	iov_iter_init(i, type, p, nr_segs, n);
1180 	*iov = p == *iov ? NULL : p;
1181 	return 0;
1182 }
1183 EXPORT_SYMBOL(import_iovec);
1184 
1185 #ifdef CONFIG_COMPAT
1186 #include <linux/compat.h>
1187 
1188 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
1189 		 unsigned nr_segs, unsigned fast_segs,
1190 		 struct iovec **iov, struct iov_iter *i)
1191 {
1192 	ssize_t n;
1193 	struct iovec *p;
1194 	n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1195 				  *iov, &p);
1196 	if (n < 0) {
1197 		if (p != *iov)
1198 			kfree(p);
1199 		*iov = NULL;
1200 		return n;
1201 	}
1202 	iov_iter_init(i, type, p, nr_segs, n);
1203 	*iov = p == *iov ? NULL : p;
1204 	return 0;
1205 }
1206 #endif
1207 
1208 int import_single_range(int rw, void __user *buf, size_t len,
1209 		 struct iovec *iov, struct iov_iter *i)
1210 {
1211 	if (len > MAX_RW_COUNT)
1212 		len = MAX_RW_COUNT;
1213 	if (unlikely(!access_ok(!rw, buf, len)))
1214 		return -EFAULT;
1215 
1216 	iov->iov_base = buf;
1217 	iov->iov_len = len;
1218 	iov_iter_init(i, rw, iov, 1, len);
1219 	return 0;
1220 }
1221 EXPORT_SYMBOL(import_single_range);
1222