xref: /openbmc/linux/lib/iov_iter.c (revision f3a8b664)
1 #include <linux/export.h>
2 #include <linux/uio.h>
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
6 #include <linux/splice.h>
7 #include <net/checksum.h>
8 
9 #define PIPE_PARANOIA /* for now */
10 
11 #define iterate_iovec(i, n, __v, __p, skip, STEP) {	\
12 	size_t left;					\
13 	size_t wanted = n;				\
14 	__p = i->iov;					\
15 	__v.iov_len = min(n, __p->iov_len - skip);	\
16 	if (likely(__v.iov_len)) {			\
17 		__v.iov_base = __p->iov_base + skip;	\
18 		left = (STEP);				\
19 		__v.iov_len -= left;			\
20 		skip += __v.iov_len;			\
21 		n -= __v.iov_len;			\
22 	} else {					\
23 		left = 0;				\
24 	}						\
25 	while (unlikely(!left && n)) {			\
26 		__p++;					\
27 		__v.iov_len = min(n, __p->iov_len);	\
28 		if (unlikely(!__v.iov_len))		\
29 			continue;			\
30 		__v.iov_base = __p->iov_base;		\
31 		left = (STEP);				\
32 		__v.iov_len -= left;			\
33 		skip = __v.iov_len;			\
34 		n -= __v.iov_len;			\
35 	}						\
36 	n = wanted - n;					\
37 }
38 
39 #define iterate_kvec(i, n, __v, __p, skip, STEP) {	\
40 	size_t wanted = n;				\
41 	__p = i->kvec;					\
42 	__v.iov_len = min(n, __p->iov_len - skip);	\
43 	if (likely(__v.iov_len)) {			\
44 		__v.iov_base = __p->iov_base + skip;	\
45 		(void)(STEP);				\
46 		skip += __v.iov_len;			\
47 		n -= __v.iov_len;			\
48 	}						\
49 	while (unlikely(n)) {				\
50 		__p++;					\
51 		__v.iov_len = min(n, __p->iov_len);	\
52 		if (unlikely(!__v.iov_len))		\
53 			continue;			\
54 		__v.iov_base = __p->iov_base;		\
55 		(void)(STEP);				\
56 		skip = __v.iov_len;			\
57 		n -= __v.iov_len;			\
58 	}						\
59 	n = wanted;					\
60 }
61 
62 #define iterate_bvec(i, n, __v, __bi, skip, STEP) {	\
63 	struct bvec_iter __start;			\
64 	__start.bi_size = n;				\
65 	__start.bi_bvec_done = skip;			\
66 	__start.bi_idx = 0;				\
67 	for_each_bvec(__v, i->bvec, __bi, __start) {	\
68 		if (!__v.bv_len)			\
69 			continue;			\
70 		(void)(STEP);				\
71 	}						\
72 }
73 
74 #define iterate_all_kinds(i, n, v, I, B, K) {			\
75 	size_t skip = i->iov_offset;				\
76 	if (unlikely(i->type & ITER_BVEC)) {			\
77 		struct bio_vec v;				\
78 		struct bvec_iter __bi;				\
79 		iterate_bvec(i, n, v, __bi, skip, (B))		\
80 	} else if (unlikely(i->type & ITER_KVEC)) {		\
81 		const struct kvec *kvec;			\
82 		struct kvec v;					\
83 		iterate_kvec(i, n, v, kvec, skip, (K))		\
84 	} else {						\
85 		const struct iovec *iov;			\
86 		struct iovec v;					\
87 		iterate_iovec(i, n, v, iov, skip, (I))		\
88 	}							\
89 }
90 
91 #define iterate_and_advance(i, n, v, I, B, K) {			\
92 	if (unlikely(i->count < n))				\
93 		n = i->count;					\
94 	if (i->count) {						\
95 		size_t skip = i->iov_offset;			\
96 		if (unlikely(i->type & ITER_BVEC)) {		\
97 			const struct bio_vec *bvec = i->bvec;	\
98 			struct bio_vec v;			\
99 			struct bvec_iter __bi;			\
100 			iterate_bvec(i, n, v, __bi, skip, (B))	\
101 			i->bvec = __bvec_iter_bvec(i->bvec, __bi);	\
102 			i->nr_segs -= i->bvec - bvec;		\
103 			skip = __bi.bi_bvec_done;		\
104 		} else if (unlikely(i->type & ITER_KVEC)) {	\
105 			const struct kvec *kvec;		\
106 			struct kvec v;				\
107 			iterate_kvec(i, n, v, kvec, skip, (K))	\
108 			if (skip == kvec->iov_len) {		\
109 				kvec++;				\
110 				skip = 0;			\
111 			}					\
112 			i->nr_segs -= kvec - i->kvec;		\
113 			i->kvec = kvec;				\
114 		} else {					\
115 			const struct iovec *iov;		\
116 			struct iovec v;				\
117 			iterate_iovec(i, n, v, iov, skip, (I))	\
118 			if (skip == iov->iov_len) {		\
119 				iov++;				\
120 				skip = 0;			\
121 			}					\
122 			i->nr_segs -= iov - i->iov;		\
123 			i->iov = iov;				\
124 		}						\
125 		i->count -= n;					\
126 		i->iov_offset = skip;				\
127 	}							\
128 }
129 
130 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
131 			 struct iov_iter *i)
132 {
133 	size_t skip, copy, left, wanted;
134 	const struct iovec *iov;
135 	char __user *buf;
136 	void *kaddr, *from;
137 
138 	if (unlikely(bytes > i->count))
139 		bytes = i->count;
140 
141 	if (unlikely(!bytes))
142 		return 0;
143 
144 	wanted = bytes;
145 	iov = i->iov;
146 	skip = i->iov_offset;
147 	buf = iov->iov_base + skip;
148 	copy = min(bytes, iov->iov_len - skip);
149 
150 	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
151 		kaddr = kmap_atomic(page);
152 		from = kaddr + offset;
153 
154 		/* first chunk, usually the only one */
155 		left = __copy_to_user_inatomic(buf, from, copy);
156 		copy -= left;
157 		skip += copy;
158 		from += copy;
159 		bytes -= copy;
160 
161 		while (unlikely(!left && bytes)) {
162 			iov++;
163 			buf = iov->iov_base;
164 			copy = min(bytes, iov->iov_len);
165 			left = __copy_to_user_inatomic(buf, from, copy);
166 			copy -= left;
167 			skip = copy;
168 			from += copy;
169 			bytes -= copy;
170 		}
171 		if (likely(!bytes)) {
172 			kunmap_atomic(kaddr);
173 			goto done;
174 		}
175 		offset = from - kaddr;
176 		buf += copy;
177 		kunmap_atomic(kaddr);
178 		copy = min(bytes, iov->iov_len - skip);
179 	}
180 	/* Too bad - revert to non-atomic kmap */
181 
182 	kaddr = kmap(page);
183 	from = kaddr + offset;
184 	left = __copy_to_user(buf, from, copy);
185 	copy -= left;
186 	skip += copy;
187 	from += copy;
188 	bytes -= copy;
189 	while (unlikely(!left && bytes)) {
190 		iov++;
191 		buf = iov->iov_base;
192 		copy = min(bytes, iov->iov_len);
193 		left = __copy_to_user(buf, from, copy);
194 		copy -= left;
195 		skip = copy;
196 		from += copy;
197 		bytes -= copy;
198 	}
199 	kunmap(page);
200 
201 done:
202 	if (skip == iov->iov_len) {
203 		iov++;
204 		skip = 0;
205 	}
206 	i->count -= wanted - bytes;
207 	i->nr_segs -= iov - i->iov;
208 	i->iov = iov;
209 	i->iov_offset = skip;
210 	return wanted - bytes;
211 }
212 
213 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
214 			 struct iov_iter *i)
215 {
216 	size_t skip, copy, left, wanted;
217 	const struct iovec *iov;
218 	char __user *buf;
219 	void *kaddr, *to;
220 
221 	if (unlikely(bytes > i->count))
222 		bytes = i->count;
223 
224 	if (unlikely(!bytes))
225 		return 0;
226 
227 	wanted = bytes;
228 	iov = i->iov;
229 	skip = i->iov_offset;
230 	buf = iov->iov_base + skip;
231 	copy = min(bytes, iov->iov_len - skip);
232 
233 	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
234 		kaddr = kmap_atomic(page);
235 		to = kaddr + offset;
236 
237 		/* first chunk, usually the only one */
238 		left = __copy_from_user_inatomic(to, buf, copy);
239 		copy -= left;
240 		skip += copy;
241 		to += copy;
242 		bytes -= copy;
243 
244 		while (unlikely(!left && bytes)) {
245 			iov++;
246 			buf = iov->iov_base;
247 			copy = min(bytes, iov->iov_len);
248 			left = __copy_from_user_inatomic(to, buf, copy);
249 			copy -= left;
250 			skip = copy;
251 			to += copy;
252 			bytes -= copy;
253 		}
254 		if (likely(!bytes)) {
255 			kunmap_atomic(kaddr);
256 			goto done;
257 		}
258 		offset = to - kaddr;
259 		buf += copy;
260 		kunmap_atomic(kaddr);
261 		copy = min(bytes, iov->iov_len - skip);
262 	}
263 	/* Too bad - revert to non-atomic kmap */
264 
265 	kaddr = kmap(page);
266 	to = kaddr + offset;
267 	left = __copy_from_user(to, buf, copy);
268 	copy -= left;
269 	skip += copy;
270 	to += copy;
271 	bytes -= copy;
272 	while (unlikely(!left && bytes)) {
273 		iov++;
274 		buf = iov->iov_base;
275 		copy = min(bytes, iov->iov_len);
276 		left = __copy_from_user(to, buf, copy);
277 		copy -= left;
278 		skip = copy;
279 		to += copy;
280 		bytes -= copy;
281 	}
282 	kunmap(page);
283 
284 done:
285 	if (skip == iov->iov_len) {
286 		iov++;
287 		skip = 0;
288 	}
289 	i->count -= wanted - bytes;
290 	i->nr_segs -= iov - i->iov;
291 	i->iov = iov;
292 	i->iov_offset = skip;
293 	return wanted - bytes;
294 }
295 
296 #ifdef PIPE_PARANOIA
297 static bool sanity(const struct iov_iter *i)
298 {
299 	struct pipe_inode_info *pipe = i->pipe;
300 	int idx = i->idx;
301 	int next = pipe->curbuf + pipe->nrbufs;
302 	if (i->iov_offset) {
303 		struct pipe_buffer *p;
304 		if (unlikely(!pipe->nrbufs))
305 			goto Bad;	// pipe must be non-empty
306 		if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
307 			goto Bad;	// must be at the last buffer...
308 
309 		p = &pipe->bufs[idx];
310 		if (unlikely(p->offset + p->len != i->iov_offset))
311 			goto Bad;	// ... at the end of segment
312 	} else {
313 		if (idx != (next & (pipe->buffers - 1)))
314 			goto Bad;	// must be right after the last buffer
315 	}
316 	return true;
317 Bad:
318 	printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
319 	printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
320 			pipe->curbuf, pipe->nrbufs, pipe->buffers);
321 	for (idx = 0; idx < pipe->buffers; idx++)
322 		printk(KERN_ERR "[%p %p %d %d]\n",
323 			pipe->bufs[idx].ops,
324 			pipe->bufs[idx].page,
325 			pipe->bufs[idx].offset,
326 			pipe->bufs[idx].len);
327 	WARN_ON(1);
328 	return false;
329 }
330 #else
331 #define sanity(i) true
332 #endif
333 
334 static inline int next_idx(int idx, struct pipe_inode_info *pipe)
335 {
336 	return (idx + 1) & (pipe->buffers - 1);
337 }
338 
339 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
340 			 struct iov_iter *i)
341 {
342 	struct pipe_inode_info *pipe = i->pipe;
343 	struct pipe_buffer *buf;
344 	size_t off;
345 	int idx;
346 
347 	if (unlikely(bytes > i->count))
348 		bytes = i->count;
349 
350 	if (unlikely(!bytes))
351 		return 0;
352 
353 	if (!sanity(i))
354 		return 0;
355 
356 	off = i->iov_offset;
357 	idx = i->idx;
358 	buf = &pipe->bufs[idx];
359 	if (off) {
360 		if (offset == off && buf->page == page) {
361 			/* merge with the last one */
362 			buf->len += bytes;
363 			i->iov_offset += bytes;
364 			goto out;
365 		}
366 		idx = next_idx(idx, pipe);
367 		buf = &pipe->bufs[idx];
368 	}
369 	if (idx == pipe->curbuf && pipe->nrbufs)
370 		return 0;
371 	pipe->nrbufs++;
372 	buf->ops = &page_cache_pipe_buf_ops;
373 	get_page(buf->page = page);
374 	buf->offset = offset;
375 	buf->len = bytes;
376 	i->iov_offset = offset + bytes;
377 	i->idx = idx;
378 out:
379 	i->count -= bytes;
380 	return bytes;
381 }
382 
383 /*
384  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
385  * bytes.  For each iovec, fault in each page that constitutes the iovec.
386  *
387  * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
388  * because it is an invalid address).
389  */
390 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
391 {
392 	size_t skip = i->iov_offset;
393 	const struct iovec *iov;
394 	int err;
395 	struct iovec v;
396 
397 	if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
398 		iterate_iovec(i, bytes, v, iov, skip, ({
399 			err = fault_in_pages_readable(v.iov_base, v.iov_len);
400 			if (unlikely(err))
401 			return err;
402 		0;}))
403 	}
404 	return 0;
405 }
406 EXPORT_SYMBOL(iov_iter_fault_in_readable);
407 
408 void iov_iter_init(struct iov_iter *i, int direction,
409 			const struct iovec *iov, unsigned long nr_segs,
410 			size_t count)
411 {
412 	/* It will get better.  Eventually... */
413 	if (segment_eq(get_fs(), KERNEL_DS)) {
414 		direction |= ITER_KVEC;
415 		i->type = direction;
416 		i->kvec = (struct kvec *)iov;
417 	} else {
418 		i->type = direction;
419 		i->iov = iov;
420 	}
421 	i->nr_segs = nr_segs;
422 	i->iov_offset = 0;
423 	i->count = count;
424 }
425 EXPORT_SYMBOL(iov_iter_init);
426 
427 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
428 {
429 	char *from = kmap_atomic(page);
430 	memcpy(to, from + offset, len);
431 	kunmap_atomic(from);
432 }
433 
434 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
435 {
436 	char *to = kmap_atomic(page);
437 	memcpy(to + offset, from, len);
438 	kunmap_atomic(to);
439 }
440 
441 static void memzero_page(struct page *page, size_t offset, size_t len)
442 {
443 	char *addr = kmap_atomic(page);
444 	memset(addr + offset, 0, len);
445 	kunmap_atomic(addr);
446 }
447 
448 static inline bool allocated(struct pipe_buffer *buf)
449 {
450 	return buf->ops == &default_pipe_buf_ops;
451 }
452 
453 static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
454 {
455 	size_t off = i->iov_offset;
456 	int idx = i->idx;
457 	if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
458 		idx = next_idx(idx, i->pipe);
459 		off = 0;
460 	}
461 	*idxp = idx;
462 	*offp = off;
463 }
464 
465 static size_t push_pipe(struct iov_iter *i, size_t size,
466 			int *idxp, size_t *offp)
467 {
468 	struct pipe_inode_info *pipe = i->pipe;
469 	size_t off;
470 	int idx;
471 	ssize_t left;
472 
473 	if (unlikely(size > i->count))
474 		size = i->count;
475 	if (unlikely(!size))
476 		return 0;
477 
478 	left = size;
479 	data_start(i, &idx, &off);
480 	*idxp = idx;
481 	*offp = off;
482 	if (off) {
483 		left -= PAGE_SIZE - off;
484 		if (left <= 0) {
485 			pipe->bufs[idx].len += size;
486 			return size;
487 		}
488 		pipe->bufs[idx].len = PAGE_SIZE;
489 		idx = next_idx(idx, pipe);
490 	}
491 	while (idx != pipe->curbuf || !pipe->nrbufs) {
492 		struct page *page = alloc_page(GFP_USER);
493 		if (!page)
494 			break;
495 		pipe->nrbufs++;
496 		pipe->bufs[idx].ops = &default_pipe_buf_ops;
497 		pipe->bufs[idx].page = page;
498 		pipe->bufs[idx].offset = 0;
499 		if (left <= PAGE_SIZE) {
500 			pipe->bufs[idx].len = left;
501 			return size;
502 		}
503 		pipe->bufs[idx].len = PAGE_SIZE;
504 		left -= PAGE_SIZE;
505 		idx = next_idx(idx, pipe);
506 	}
507 	return size - left;
508 }
509 
510 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
511 				struct iov_iter *i)
512 {
513 	struct pipe_inode_info *pipe = i->pipe;
514 	size_t n, off;
515 	int idx;
516 
517 	if (!sanity(i))
518 		return 0;
519 
520 	bytes = n = push_pipe(i, bytes, &idx, &off);
521 	if (unlikely(!n))
522 		return 0;
523 	for ( ; n; idx = next_idx(idx, pipe), off = 0) {
524 		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
525 		memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
526 		i->idx = idx;
527 		i->iov_offset = off + chunk;
528 		n -= chunk;
529 		addr += chunk;
530 	}
531 	i->count -= bytes;
532 	return bytes;
533 }
534 
535 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
536 {
537 	const char *from = addr;
538 	if (unlikely(i->type & ITER_PIPE))
539 		return copy_pipe_to_iter(addr, bytes, i);
540 	iterate_and_advance(i, bytes, v,
541 		__copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
542 			       v.iov_len),
543 		memcpy_to_page(v.bv_page, v.bv_offset,
544 			       (from += v.bv_len) - v.bv_len, v.bv_len),
545 		memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
546 	)
547 
548 	return bytes;
549 }
550 EXPORT_SYMBOL(copy_to_iter);
551 
552 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
553 {
554 	char *to = addr;
555 	if (unlikely(i->type & ITER_PIPE)) {
556 		WARN_ON(1);
557 		return 0;
558 	}
559 	iterate_and_advance(i, bytes, v,
560 		__copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
561 				 v.iov_len),
562 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
563 				 v.bv_offset, v.bv_len),
564 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
565 	)
566 
567 	return bytes;
568 }
569 EXPORT_SYMBOL(copy_from_iter);
570 
571 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
572 {
573 	char *to = addr;
574 	if (unlikely(i->type & ITER_PIPE)) {
575 		WARN_ON(1);
576 		return 0;
577 	}
578 	iterate_and_advance(i, bytes, v,
579 		__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
580 					 v.iov_base, v.iov_len),
581 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
582 				 v.bv_offset, v.bv_len),
583 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
584 	)
585 
586 	return bytes;
587 }
588 EXPORT_SYMBOL(copy_from_iter_nocache);
589 
590 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
591 			 struct iov_iter *i)
592 {
593 	if (i->type & (ITER_BVEC|ITER_KVEC)) {
594 		void *kaddr = kmap_atomic(page);
595 		size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
596 		kunmap_atomic(kaddr);
597 		return wanted;
598 	} else if (likely(!(i->type & ITER_PIPE)))
599 		return copy_page_to_iter_iovec(page, offset, bytes, i);
600 	else
601 		return copy_page_to_iter_pipe(page, offset, bytes, i);
602 }
603 EXPORT_SYMBOL(copy_page_to_iter);
604 
605 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
606 			 struct iov_iter *i)
607 {
608 	if (unlikely(i->type & ITER_PIPE)) {
609 		WARN_ON(1);
610 		return 0;
611 	}
612 	if (i->type & (ITER_BVEC|ITER_KVEC)) {
613 		void *kaddr = kmap_atomic(page);
614 		size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
615 		kunmap_atomic(kaddr);
616 		return wanted;
617 	} else
618 		return copy_page_from_iter_iovec(page, offset, bytes, i);
619 }
620 EXPORT_SYMBOL(copy_page_from_iter);
621 
622 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
623 {
624 	struct pipe_inode_info *pipe = i->pipe;
625 	size_t n, off;
626 	int idx;
627 
628 	if (!sanity(i))
629 		return 0;
630 
631 	bytes = n = push_pipe(i, bytes, &idx, &off);
632 	if (unlikely(!n))
633 		return 0;
634 
635 	for ( ; n; idx = next_idx(idx, pipe), off = 0) {
636 		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
637 		memzero_page(pipe->bufs[idx].page, off, chunk);
638 		i->idx = idx;
639 		i->iov_offset = off + chunk;
640 		n -= chunk;
641 	}
642 	i->count -= bytes;
643 	return bytes;
644 }
645 
646 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
647 {
648 	if (unlikely(i->type & ITER_PIPE))
649 		return pipe_zero(bytes, i);
650 	iterate_and_advance(i, bytes, v,
651 		__clear_user(v.iov_base, v.iov_len),
652 		memzero_page(v.bv_page, v.bv_offset, v.bv_len),
653 		memset(v.iov_base, 0, v.iov_len)
654 	)
655 
656 	return bytes;
657 }
658 EXPORT_SYMBOL(iov_iter_zero);
659 
660 size_t iov_iter_copy_from_user_atomic(struct page *page,
661 		struct iov_iter *i, unsigned long offset, size_t bytes)
662 {
663 	char *kaddr = kmap_atomic(page), *p = kaddr + offset;
664 	if (unlikely(i->type & ITER_PIPE)) {
665 		kunmap_atomic(kaddr);
666 		WARN_ON(1);
667 		return 0;
668 	}
669 	iterate_all_kinds(i, bytes, v,
670 		__copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
671 					  v.iov_base, v.iov_len),
672 		memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
673 				 v.bv_offset, v.bv_len),
674 		memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
675 	)
676 	kunmap_atomic(kaddr);
677 	return bytes;
678 }
679 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
680 
681 static void pipe_advance(struct iov_iter *i, size_t size)
682 {
683 	struct pipe_inode_info *pipe = i->pipe;
684 	struct pipe_buffer *buf;
685 	int idx = i->idx;
686 	size_t off = i->iov_offset;
687 
688 	if (unlikely(i->count < size))
689 		size = i->count;
690 
691 	if (size) {
692 		if (off) /* make it relative to the beginning of buffer */
693 			size += off - pipe->bufs[idx].offset;
694 		while (1) {
695 			buf = &pipe->bufs[idx];
696 			if (size <= buf->len)
697 				break;
698 			size -= buf->len;
699 			idx = next_idx(idx, pipe);
700 		}
701 		buf->len = size;
702 		i->idx = idx;
703 		off = i->iov_offset = buf->offset + size;
704 	}
705 	if (off)
706 		idx = next_idx(idx, pipe);
707 	if (pipe->nrbufs) {
708 		int unused = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
709 		/* [curbuf,unused) is in use.  Free [idx,unused) */
710 		while (idx != unused) {
711 			pipe_buf_release(pipe, &pipe->bufs[idx]);
712 			idx = next_idx(idx, pipe);
713 			pipe->nrbufs--;
714 		}
715 	}
716 }
717 
718 void iov_iter_advance(struct iov_iter *i, size_t size)
719 {
720 	if (unlikely(i->type & ITER_PIPE)) {
721 		pipe_advance(i, size);
722 		return;
723 	}
724 	iterate_and_advance(i, size, v, 0, 0, 0)
725 }
726 EXPORT_SYMBOL(iov_iter_advance);
727 
728 /*
729  * Return the count of just the current iov_iter segment.
730  */
731 size_t iov_iter_single_seg_count(const struct iov_iter *i)
732 {
733 	if (unlikely(i->type & ITER_PIPE))
734 		return i->count;	// it is a silly place, anyway
735 	if (i->nr_segs == 1)
736 		return i->count;
737 	else if (i->type & ITER_BVEC)
738 		return min(i->count, i->bvec->bv_len - i->iov_offset);
739 	else
740 		return min(i->count, i->iov->iov_len - i->iov_offset);
741 }
742 EXPORT_SYMBOL(iov_iter_single_seg_count);
743 
744 void iov_iter_kvec(struct iov_iter *i, int direction,
745 			const struct kvec *kvec, unsigned long nr_segs,
746 			size_t count)
747 {
748 	BUG_ON(!(direction & ITER_KVEC));
749 	i->type = direction;
750 	i->kvec = kvec;
751 	i->nr_segs = nr_segs;
752 	i->iov_offset = 0;
753 	i->count = count;
754 }
755 EXPORT_SYMBOL(iov_iter_kvec);
756 
757 void iov_iter_bvec(struct iov_iter *i, int direction,
758 			const struct bio_vec *bvec, unsigned long nr_segs,
759 			size_t count)
760 {
761 	BUG_ON(!(direction & ITER_BVEC));
762 	i->type = direction;
763 	i->bvec = bvec;
764 	i->nr_segs = nr_segs;
765 	i->iov_offset = 0;
766 	i->count = count;
767 }
768 EXPORT_SYMBOL(iov_iter_bvec);
769 
770 void iov_iter_pipe(struct iov_iter *i, int direction,
771 			struct pipe_inode_info *pipe,
772 			size_t count)
773 {
774 	BUG_ON(direction != ITER_PIPE);
775 	i->type = direction;
776 	i->pipe = pipe;
777 	i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
778 	i->iov_offset = 0;
779 	i->count = count;
780 }
781 EXPORT_SYMBOL(iov_iter_pipe);
782 
783 unsigned long iov_iter_alignment(const struct iov_iter *i)
784 {
785 	unsigned long res = 0;
786 	size_t size = i->count;
787 
788 	if (!size)
789 		return 0;
790 
791 	if (unlikely(i->type & ITER_PIPE)) {
792 		if (i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
793 			return size | i->iov_offset;
794 		return size;
795 	}
796 	iterate_all_kinds(i, size, v,
797 		(res |= (unsigned long)v.iov_base | v.iov_len, 0),
798 		res |= v.bv_offset | v.bv_len,
799 		res |= (unsigned long)v.iov_base | v.iov_len
800 	)
801 	return res;
802 }
803 EXPORT_SYMBOL(iov_iter_alignment);
804 
805 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
806 {
807         unsigned long res = 0;
808 	size_t size = i->count;
809 	if (!size)
810 		return 0;
811 
812 	if (unlikely(i->type & ITER_PIPE)) {
813 		WARN_ON(1);
814 		return ~0U;
815 	}
816 
817 	iterate_all_kinds(i, size, v,
818 		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
819 			(size != v.iov_len ? size : 0), 0),
820 		(res |= (!res ? 0 : (unsigned long)v.bv_offset) |
821 			(size != v.bv_len ? size : 0)),
822 		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
823 			(size != v.iov_len ? size : 0))
824 		);
825 		return res;
826 }
827 EXPORT_SYMBOL(iov_iter_gap_alignment);
828 
829 static inline size_t __pipe_get_pages(struct iov_iter *i,
830 				size_t maxsize,
831 				struct page **pages,
832 				int idx,
833 				size_t *start)
834 {
835 	struct pipe_inode_info *pipe = i->pipe;
836 	ssize_t n = push_pipe(i, maxsize, &idx, start);
837 	if (!n)
838 		return -EFAULT;
839 
840 	maxsize = n;
841 	n += *start;
842 	while (n > 0) {
843 		get_page(*pages++ = pipe->bufs[idx].page);
844 		idx = next_idx(idx, pipe);
845 		n -= PAGE_SIZE;
846 	}
847 
848 	return maxsize;
849 }
850 
851 static ssize_t pipe_get_pages(struct iov_iter *i,
852 		   struct page **pages, size_t maxsize, unsigned maxpages,
853 		   size_t *start)
854 {
855 	unsigned npages;
856 	size_t capacity;
857 	int idx;
858 
859 	if (!sanity(i))
860 		return -EFAULT;
861 
862 	data_start(i, &idx, start);
863 	/* some of this one + all after this one */
864 	npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
865 	capacity = min(npages,maxpages) * PAGE_SIZE - *start;
866 
867 	return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
868 }
869 
870 ssize_t iov_iter_get_pages(struct iov_iter *i,
871 		   struct page **pages, size_t maxsize, unsigned maxpages,
872 		   size_t *start)
873 {
874 	if (maxsize > i->count)
875 		maxsize = i->count;
876 
877 	if (!maxsize)
878 		return 0;
879 
880 	if (unlikely(i->type & ITER_PIPE))
881 		return pipe_get_pages(i, pages, maxsize, maxpages, start);
882 	iterate_all_kinds(i, maxsize, v, ({
883 		unsigned long addr = (unsigned long)v.iov_base;
884 		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
885 		int n;
886 		int res;
887 
888 		if (len > maxpages * PAGE_SIZE)
889 			len = maxpages * PAGE_SIZE;
890 		addr &= ~(PAGE_SIZE - 1);
891 		n = DIV_ROUND_UP(len, PAGE_SIZE);
892 		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
893 		if (unlikely(res < 0))
894 			return res;
895 		return (res == n ? len : res * PAGE_SIZE) - *start;
896 	0;}),({
897 		/* can't be more than PAGE_SIZE */
898 		*start = v.bv_offset;
899 		get_page(*pages = v.bv_page);
900 		return v.bv_len;
901 	}),({
902 		return -EFAULT;
903 	})
904 	)
905 	return 0;
906 }
907 EXPORT_SYMBOL(iov_iter_get_pages);
908 
909 static struct page **get_pages_array(size_t n)
910 {
911 	struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
912 	if (!p)
913 		p = vmalloc(n * sizeof(struct page *));
914 	return p;
915 }
916 
917 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
918 		   struct page ***pages, size_t maxsize,
919 		   size_t *start)
920 {
921 	struct page **p;
922 	size_t n;
923 	int idx;
924 	int npages;
925 
926 	if (!sanity(i))
927 		return -EFAULT;
928 
929 	data_start(i, &idx, start);
930 	/* some of this one + all after this one */
931 	npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
932 	n = npages * PAGE_SIZE - *start;
933 	if (maxsize > n)
934 		maxsize = n;
935 	else
936 		npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
937 	p = get_pages_array(npages);
938 	if (!p)
939 		return -ENOMEM;
940 	n = __pipe_get_pages(i, maxsize, p, idx, start);
941 	if (n > 0)
942 		*pages = p;
943 	else
944 		kvfree(p);
945 	return n;
946 }
947 
948 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
949 		   struct page ***pages, size_t maxsize,
950 		   size_t *start)
951 {
952 	struct page **p;
953 
954 	if (maxsize > i->count)
955 		maxsize = i->count;
956 
957 	if (!maxsize)
958 		return 0;
959 
960 	if (unlikely(i->type & ITER_PIPE))
961 		return pipe_get_pages_alloc(i, pages, maxsize, start);
962 	iterate_all_kinds(i, maxsize, v, ({
963 		unsigned long addr = (unsigned long)v.iov_base;
964 		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
965 		int n;
966 		int res;
967 
968 		addr &= ~(PAGE_SIZE - 1);
969 		n = DIV_ROUND_UP(len, PAGE_SIZE);
970 		p = get_pages_array(n);
971 		if (!p)
972 			return -ENOMEM;
973 		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
974 		if (unlikely(res < 0)) {
975 			kvfree(p);
976 			return res;
977 		}
978 		*pages = p;
979 		return (res == n ? len : res * PAGE_SIZE) - *start;
980 	0;}),({
981 		/* can't be more than PAGE_SIZE */
982 		*start = v.bv_offset;
983 		*pages = p = get_pages_array(1);
984 		if (!p)
985 			return -ENOMEM;
986 		get_page(*p = v.bv_page);
987 		return v.bv_len;
988 	}),({
989 		return -EFAULT;
990 	})
991 	)
992 	return 0;
993 }
994 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
995 
996 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
997 			       struct iov_iter *i)
998 {
999 	char *to = addr;
1000 	__wsum sum, next;
1001 	size_t off = 0;
1002 	sum = *csum;
1003 	if (unlikely(i->type & ITER_PIPE)) {
1004 		WARN_ON(1);
1005 		return 0;
1006 	}
1007 	iterate_and_advance(i, bytes, v, ({
1008 		int err = 0;
1009 		next = csum_and_copy_from_user(v.iov_base,
1010 					       (to += v.iov_len) - v.iov_len,
1011 					       v.iov_len, 0, &err);
1012 		if (!err) {
1013 			sum = csum_block_add(sum, next, off);
1014 			off += v.iov_len;
1015 		}
1016 		err ? v.iov_len : 0;
1017 	}), ({
1018 		char *p = kmap_atomic(v.bv_page);
1019 		next = csum_partial_copy_nocheck(p + v.bv_offset,
1020 						 (to += v.bv_len) - v.bv_len,
1021 						 v.bv_len, 0);
1022 		kunmap_atomic(p);
1023 		sum = csum_block_add(sum, next, off);
1024 		off += v.bv_len;
1025 	}),({
1026 		next = csum_partial_copy_nocheck(v.iov_base,
1027 						 (to += v.iov_len) - v.iov_len,
1028 						 v.iov_len, 0);
1029 		sum = csum_block_add(sum, next, off);
1030 		off += v.iov_len;
1031 	})
1032 	)
1033 	*csum = sum;
1034 	return bytes;
1035 }
1036 EXPORT_SYMBOL(csum_and_copy_from_iter);
1037 
1038 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
1039 			     struct iov_iter *i)
1040 {
1041 	const char *from = addr;
1042 	__wsum sum, next;
1043 	size_t off = 0;
1044 	sum = *csum;
1045 	if (unlikely(i->type & ITER_PIPE)) {
1046 		WARN_ON(1);	/* for now */
1047 		return 0;
1048 	}
1049 	iterate_and_advance(i, bytes, v, ({
1050 		int err = 0;
1051 		next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1052 					     v.iov_base,
1053 					     v.iov_len, 0, &err);
1054 		if (!err) {
1055 			sum = csum_block_add(sum, next, off);
1056 			off += v.iov_len;
1057 		}
1058 		err ? v.iov_len : 0;
1059 	}), ({
1060 		char *p = kmap_atomic(v.bv_page);
1061 		next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
1062 						 p + v.bv_offset,
1063 						 v.bv_len, 0);
1064 		kunmap_atomic(p);
1065 		sum = csum_block_add(sum, next, off);
1066 		off += v.bv_len;
1067 	}),({
1068 		next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
1069 						 v.iov_base,
1070 						 v.iov_len, 0);
1071 		sum = csum_block_add(sum, next, off);
1072 		off += v.iov_len;
1073 	})
1074 	)
1075 	*csum = sum;
1076 	return bytes;
1077 }
1078 EXPORT_SYMBOL(csum_and_copy_to_iter);
1079 
1080 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1081 {
1082 	size_t size = i->count;
1083 	int npages = 0;
1084 
1085 	if (!size)
1086 		return 0;
1087 
1088 	if (unlikely(i->type & ITER_PIPE)) {
1089 		struct pipe_inode_info *pipe = i->pipe;
1090 		size_t off;
1091 		int idx;
1092 
1093 		if (!sanity(i))
1094 			return 0;
1095 
1096 		data_start(i, &idx, &off);
1097 		/* some of this one + all after this one */
1098 		npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
1099 		if (npages >= maxpages)
1100 			return maxpages;
1101 	} else iterate_all_kinds(i, size, v, ({
1102 		unsigned long p = (unsigned long)v.iov_base;
1103 		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1104 			- p / PAGE_SIZE;
1105 		if (npages >= maxpages)
1106 			return maxpages;
1107 	0;}),({
1108 		npages++;
1109 		if (npages >= maxpages)
1110 			return maxpages;
1111 	}),({
1112 		unsigned long p = (unsigned long)v.iov_base;
1113 		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1114 			- p / PAGE_SIZE;
1115 		if (npages >= maxpages)
1116 			return maxpages;
1117 	})
1118 	)
1119 	return npages;
1120 }
1121 EXPORT_SYMBOL(iov_iter_npages);
1122 
1123 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1124 {
1125 	*new = *old;
1126 	if (unlikely(new->type & ITER_PIPE)) {
1127 		WARN_ON(1);
1128 		return NULL;
1129 	}
1130 	if (new->type & ITER_BVEC)
1131 		return new->bvec = kmemdup(new->bvec,
1132 				    new->nr_segs * sizeof(struct bio_vec),
1133 				    flags);
1134 	else
1135 		/* iovec and kvec have identical layout */
1136 		return new->iov = kmemdup(new->iov,
1137 				   new->nr_segs * sizeof(struct iovec),
1138 				   flags);
1139 }
1140 EXPORT_SYMBOL(dup_iter);
1141 
1142 /**
1143  * import_iovec() - Copy an array of &struct iovec from userspace
1144  *     into the kernel, check that it is valid, and initialize a new
1145  *     &struct iov_iter iterator to access it.
1146  *
1147  * @type: One of %READ or %WRITE.
1148  * @uvector: Pointer to the userspace array.
1149  * @nr_segs: Number of elements in userspace array.
1150  * @fast_segs: Number of elements in @iov.
1151  * @iov: (input and output parameter) Pointer to pointer to (usually small
1152  *     on-stack) kernel array.
1153  * @i: Pointer to iterator that will be initialized on success.
1154  *
1155  * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1156  * then this function places %NULL in *@iov on return. Otherwise, a new
1157  * array will be allocated and the result placed in *@iov. This means that
1158  * the caller may call kfree() on *@iov regardless of whether the small
1159  * on-stack array was used or not (and regardless of whether this function
1160  * returns an error or not).
1161  *
1162  * Return: 0 on success or negative error code on error.
1163  */
1164 int import_iovec(int type, const struct iovec __user * uvector,
1165 		 unsigned nr_segs, unsigned fast_segs,
1166 		 struct iovec **iov, struct iov_iter *i)
1167 {
1168 	ssize_t n;
1169 	struct iovec *p;
1170 	n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1171 				  *iov, &p);
1172 	if (n < 0) {
1173 		if (p != *iov)
1174 			kfree(p);
1175 		*iov = NULL;
1176 		return n;
1177 	}
1178 	iov_iter_init(i, type, p, nr_segs, n);
1179 	*iov = p == *iov ? NULL : p;
1180 	return 0;
1181 }
1182 EXPORT_SYMBOL(import_iovec);
1183 
1184 #ifdef CONFIG_COMPAT
1185 #include <linux/compat.h>
1186 
1187 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
1188 		 unsigned nr_segs, unsigned fast_segs,
1189 		 struct iovec **iov, struct iov_iter *i)
1190 {
1191 	ssize_t n;
1192 	struct iovec *p;
1193 	n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1194 				  *iov, &p);
1195 	if (n < 0) {
1196 		if (p != *iov)
1197 			kfree(p);
1198 		*iov = NULL;
1199 		return n;
1200 	}
1201 	iov_iter_init(i, type, p, nr_segs, n);
1202 	*iov = p == *iov ? NULL : p;
1203 	return 0;
1204 }
1205 #endif
1206 
1207 int import_single_range(int rw, void __user *buf, size_t len,
1208 		 struct iovec *iov, struct iov_iter *i)
1209 {
1210 	if (len > MAX_RW_COUNT)
1211 		len = MAX_RW_COUNT;
1212 	if (unlikely(!access_ok(!rw, buf, len)))
1213 		return -EFAULT;
1214 
1215 	iov->iov_base = buf;
1216 	iov->iov_len = len;
1217 	iov_iter_init(i, rw, iov, 1, len);
1218 	return 0;
1219 }
1220 EXPORT_SYMBOL(import_single_range);
1221