xref: /openbmc/linux/lib/iov_iter.c (revision 7211ec63)
1 #include <linux/export.h>
2 #include <linux/bvec.h>
3 #include <linux/uio.h>
4 #include <linux/pagemap.h>
5 #include <linux/slab.h>
6 #include <linux/vmalloc.h>
7 #include <linux/splice.h>
8 #include <net/checksum.h>
9 
10 #define PIPE_PARANOIA /* for now */
11 
12 #define iterate_iovec(i, n, __v, __p, skip, STEP) {	\
13 	size_t left;					\
14 	size_t wanted = n;				\
15 	__p = i->iov;					\
16 	__v.iov_len = min(n, __p->iov_len - skip);	\
17 	if (likely(__v.iov_len)) {			\
18 		__v.iov_base = __p->iov_base + skip;	\
19 		left = (STEP);				\
20 		__v.iov_len -= left;			\
21 		skip += __v.iov_len;			\
22 		n -= __v.iov_len;			\
23 	} else {					\
24 		left = 0;				\
25 	}						\
26 	while (unlikely(!left && n)) {			\
27 		__p++;					\
28 		__v.iov_len = min(n, __p->iov_len);	\
29 		if (unlikely(!__v.iov_len))		\
30 			continue;			\
31 		__v.iov_base = __p->iov_base;		\
32 		left = (STEP);				\
33 		__v.iov_len -= left;			\
34 		skip = __v.iov_len;			\
35 		n -= __v.iov_len;			\
36 	}						\
37 	n = wanted - n;					\
38 }
39 
40 #define iterate_kvec(i, n, __v, __p, skip, STEP) {	\
41 	size_t wanted = n;				\
42 	__p = i->kvec;					\
43 	__v.iov_len = min(n, __p->iov_len - skip);	\
44 	if (likely(__v.iov_len)) {			\
45 		__v.iov_base = __p->iov_base + skip;	\
46 		(void)(STEP);				\
47 		skip += __v.iov_len;			\
48 		n -= __v.iov_len;			\
49 	}						\
50 	while (unlikely(n)) {				\
51 		__p++;					\
52 		__v.iov_len = min(n, __p->iov_len);	\
53 		if (unlikely(!__v.iov_len))		\
54 			continue;			\
55 		__v.iov_base = __p->iov_base;		\
56 		(void)(STEP);				\
57 		skip = __v.iov_len;			\
58 		n -= __v.iov_len;			\
59 	}						\
60 	n = wanted;					\
61 }
62 
63 #define iterate_bvec(i, n, __v, __bi, skip, STEP) {	\
64 	struct bvec_iter __start;			\
65 	__start.bi_size = n;				\
66 	__start.bi_bvec_done = skip;			\
67 	__start.bi_idx = 0;				\
68 	for_each_bvec(__v, i->bvec, __bi, __start) {	\
69 		if (!__v.bv_len)			\
70 			continue;			\
71 		(void)(STEP);				\
72 	}						\
73 }
74 
75 #define iterate_all_kinds(i, n, v, I, B, K) {			\
76 	if (likely(n)) {					\
77 		size_t skip = i->iov_offset;			\
78 		if (unlikely(i->type & ITER_BVEC)) {		\
79 			struct bio_vec v;			\
80 			struct bvec_iter __bi;			\
81 			iterate_bvec(i, n, v, __bi, skip, (B))	\
82 		} else if (unlikely(i->type & ITER_KVEC)) {	\
83 			const struct kvec *kvec;		\
84 			struct kvec v;				\
85 			iterate_kvec(i, n, v, kvec, skip, (K))	\
86 		} else {					\
87 			const struct iovec *iov;		\
88 			struct iovec v;				\
89 			iterate_iovec(i, n, v, iov, skip, (I))	\
90 		}						\
91 	}							\
92 }
93 
94 #define iterate_and_advance(i, n, v, I, B, K) {			\
95 	if (unlikely(i->count < n))				\
96 		n = i->count;					\
97 	if (i->count) {						\
98 		size_t skip = i->iov_offset;			\
99 		if (unlikely(i->type & ITER_BVEC)) {		\
100 			const struct bio_vec *bvec = i->bvec;	\
101 			struct bio_vec v;			\
102 			struct bvec_iter __bi;			\
103 			iterate_bvec(i, n, v, __bi, skip, (B))	\
104 			i->bvec = __bvec_iter_bvec(i->bvec, __bi);	\
105 			i->nr_segs -= i->bvec - bvec;		\
106 			skip = __bi.bi_bvec_done;		\
107 		} else if (unlikely(i->type & ITER_KVEC)) {	\
108 			const struct kvec *kvec;		\
109 			struct kvec v;				\
110 			iterate_kvec(i, n, v, kvec, skip, (K))	\
111 			if (skip == kvec->iov_len) {		\
112 				kvec++;				\
113 				skip = 0;			\
114 			}					\
115 			i->nr_segs -= kvec - i->kvec;		\
116 			i->kvec = kvec;				\
117 		} else {					\
118 			const struct iovec *iov;		\
119 			struct iovec v;				\
120 			iterate_iovec(i, n, v, iov, skip, (I))	\
121 			if (skip == iov->iov_len) {		\
122 				iov++;				\
123 				skip = 0;			\
124 			}					\
125 			i->nr_segs -= iov - i->iov;		\
126 			i->iov = iov;				\
127 		}						\
128 		i->count -= n;					\
129 		i->iov_offset = skip;				\
130 	}							\
131 }
132 
133 static int copyout(void __user *to, const void *from, size_t n)
134 {
135 	if (access_ok(VERIFY_WRITE, to, n)) {
136 		kasan_check_read(from, n);
137 		n = raw_copy_to_user(to, from, n);
138 	}
139 	return n;
140 }
141 
142 static int copyin(void *to, const void __user *from, size_t n)
143 {
144 	if (access_ok(VERIFY_READ, from, n)) {
145 		kasan_check_write(to, n);
146 		n = raw_copy_from_user(to, from, n);
147 	}
148 	return n;
149 }
150 
151 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
152 			 struct iov_iter *i)
153 {
154 	size_t skip, copy, left, wanted;
155 	const struct iovec *iov;
156 	char __user *buf;
157 	void *kaddr, *from;
158 
159 	if (unlikely(bytes > i->count))
160 		bytes = i->count;
161 
162 	if (unlikely(!bytes))
163 		return 0;
164 
165 	might_fault();
166 	wanted = bytes;
167 	iov = i->iov;
168 	skip = i->iov_offset;
169 	buf = iov->iov_base + skip;
170 	copy = min(bytes, iov->iov_len - skip);
171 
172 	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
173 		kaddr = kmap_atomic(page);
174 		from = kaddr + offset;
175 
176 		/* first chunk, usually the only one */
177 		left = copyout(buf, from, copy);
178 		copy -= left;
179 		skip += copy;
180 		from += copy;
181 		bytes -= copy;
182 
183 		while (unlikely(!left && bytes)) {
184 			iov++;
185 			buf = iov->iov_base;
186 			copy = min(bytes, iov->iov_len);
187 			left = copyout(buf, from, copy);
188 			copy -= left;
189 			skip = copy;
190 			from += copy;
191 			bytes -= copy;
192 		}
193 		if (likely(!bytes)) {
194 			kunmap_atomic(kaddr);
195 			goto done;
196 		}
197 		offset = from - kaddr;
198 		buf += copy;
199 		kunmap_atomic(kaddr);
200 		copy = min(bytes, iov->iov_len - skip);
201 	}
202 	/* Too bad - revert to non-atomic kmap */
203 
204 	kaddr = kmap(page);
205 	from = kaddr + offset;
206 	left = copyout(buf, from, copy);
207 	copy -= left;
208 	skip += copy;
209 	from += copy;
210 	bytes -= copy;
211 	while (unlikely(!left && bytes)) {
212 		iov++;
213 		buf = iov->iov_base;
214 		copy = min(bytes, iov->iov_len);
215 		left = copyout(buf, from, copy);
216 		copy -= left;
217 		skip = copy;
218 		from += copy;
219 		bytes -= copy;
220 	}
221 	kunmap(page);
222 
223 done:
224 	if (skip == iov->iov_len) {
225 		iov++;
226 		skip = 0;
227 	}
228 	i->count -= wanted - bytes;
229 	i->nr_segs -= iov - i->iov;
230 	i->iov = iov;
231 	i->iov_offset = skip;
232 	return wanted - bytes;
233 }
234 
235 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
236 			 struct iov_iter *i)
237 {
238 	size_t skip, copy, left, wanted;
239 	const struct iovec *iov;
240 	char __user *buf;
241 	void *kaddr, *to;
242 
243 	if (unlikely(bytes > i->count))
244 		bytes = i->count;
245 
246 	if (unlikely(!bytes))
247 		return 0;
248 
249 	might_fault();
250 	wanted = bytes;
251 	iov = i->iov;
252 	skip = i->iov_offset;
253 	buf = iov->iov_base + skip;
254 	copy = min(bytes, iov->iov_len - skip);
255 
256 	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
257 		kaddr = kmap_atomic(page);
258 		to = kaddr + offset;
259 
260 		/* first chunk, usually the only one */
261 		left = copyin(to, buf, copy);
262 		copy -= left;
263 		skip += copy;
264 		to += copy;
265 		bytes -= copy;
266 
267 		while (unlikely(!left && bytes)) {
268 			iov++;
269 			buf = iov->iov_base;
270 			copy = min(bytes, iov->iov_len);
271 			left = copyin(to, buf, copy);
272 			copy -= left;
273 			skip = copy;
274 			to += copy;
275 			bytes -= copy;
276 		}
277 		if (likely(!bytes)) {
278 			kunmap_atomic(kaddr);
279 			goto done;
280 		}
281 		offset = to - kaddr;
282 		buf += copy;
283 		kunmap_atomic(kaddr);
284 		copy = min(bytes, iov->iov_len - skip);
285 	}
286 	/* Too bad - revert to non-atomic kmap */
287 
288 	kaddr = kmap(page);
289 	to = kaddr + offset;
290 	left = copyin(to, buf, copy);
291 	copy -= left;
292 	skip += copy;
293 	to += copy;
294 	bytes -= copy;
295 	while (unlikely(!left && bytes)) {
296 		iov++;
297 		buf = iov->iov_base;
298 		copy = min(bytes, iov->iov_len);
299 		left = copyin(to, buf, copy);
300 		copy -= left;
301 		skip = copy;
302 		to += copy;
303 		bytes -= copy;
304 	}
305 	kunmap(page);
306 
307 done:
308 	if (skip == iov->iov_len) {
309 		iov++;
310 		skip = 0;
311 	}
312 	i->count -= wanted - bytes;
313 	i->nr_segs -= iov - i->iov;
314 	i->iov = iov;
315 	i->iov_offset = skip;
316 	return wanted - bytes;
317 }
318 
319 #ifdef PIPE_PARANOIA
320 static bool sanity(const struct iov_iter *i)
321 {
322 	struct pipe_inode_info *pipe = i->pipe;
323 	int idx = i->idx;
324 	int next = pipe->curbuf + pipe->nrbufs;
325 	if (i->iov_offset) {
326 		struct pipe_buffer *p;
327 		if (unlikely(!pipe->nrbufs))
328 			goto Bad;	// pipe must be non-empty
329 		if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
330 			goto Bad;	// must be at the last buffer...
331 
332 		p = &pipe->bufs[idx];
333 		if (unlikely(p->offset + p->len != i->iov_offset))
334 			goto Bad;	// ... at the end of segment
335 	} else {
336 		if (idx != (next & (pipe->buffers - 1)))
337 			goto Bad;	// must be right after the last buffer
338 	}
339 	return true;
340 Bad:
341 	printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
342 	printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
343 			pipe->curbuf, pipe->nrbufs, pipe->buffers);
344 	for (idx = 0; idx < pipe->buffers; idx++)
345 		printk(KERN_ERR "[%p %p %d %d]\n",
346 			pipe->bufs[idx].ops,
347 			pipe->bufs[idx].page,
348 			pipe->bufs[idx].offset,
349 			pipe->bufs[idx].len);
350 	WARN_ON(1);
351 	return false;
352 }
353 #else
354 #define sanity(i) true
355 #endif
356 
357 static inline int next_idx(int idx, struct pipe_inode_info *pipe)
358 {
359 	return (idx + 1) & (pipe->buffers - 1);
360 }
361 
362 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
363 			 struct iov_iter *i)
364 {
365 	struct pipe_inode_info *pipe = i->pipe;
366 	struct pipe_buffer *buf;
367 	size_t off;
368 	int idx;
369 
370 	if (unlikely(bytes > i->count))
371 		bytes = i->count;
372 
373 	if (unlikely(!bytes))
374 		return 0;
375 
376 	if (!sanity(i))
377 		return 0;
378 
379 	off = i->iov_offset;
380 	idx = i->idx;
381 	buf = &pipe->bufs[idx];
382 	if (off) {
383 		if (offset == off && buf->page == page) {
384 			/* merge with the last one */
385 			buf->len += bytes;
386 			i->iov_offset += bytes;
387 			goto out;
388 		}
389 		idx = next_idx(idx, pipe);
390 		buf = &pipe->bufs[idx];
391 	}
392 	if (idx == pipe->curbuf && pipe->nrbufs)
393 		return 0;
394 	pipe->nrbufs++;
395 	buf->ops = &page_cache_pipe_buf_ops;
396 	get_page(buf->page = page);
397 	buf->offset = offset;
398 	buf->len = bytes;
399 	i->iov_offset = offset + bytes;
400 	i->idx = idx;
401 out:
402 	i->count -= bytes;
403 	return bytes;
404 }
405 
406 /*
407  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
408  * bytes.  For each iovec, fault in each page that constitutes the iovec.
409  *
410  * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
411  * because it is an invalid address).
412  */
413 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
414 {
415 	size_t skip = i->iov_offset;
416 	const struct iovec *iov;
417 	int err;
418 	struct iovec v;
419 
420 	if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
421 		iterate_iovec(i, bytes, v, iov, skip, ({
422 			err = fault_in_pages_readable(v.iov_base, v.iov_len);
423 			if (unlikely(err))
424 			return err;
425 		0;}))
426 	}
427 	return 0;
428 }
429 EXPORT_SYMBOL(iov_iter_fault_in_readable);
430 
431 void iov_iter_init(struct iov_iter *i, int direction,
432 			const struct iovec *iov, unsigned long nr_segs,
433 			size_t count)
434 {
435 	/* It will get better.  Eventually... */
436 	if (uaccess_kernel()) {
437 		direction |= ITER_KVEC;
438 		i->type = direction;
439 		i->kvec = (struct kvec *)iov;
440 	} else {
441 		i->type = direction;
442 		i->iov = iov;
443 	}
444 	i->nr_segs = nr_segs;
445 	i->iov_offset = 0;
446 	i->count = count;
447 }
448 EXPORT_SYMBOL(iov_iter_init);
449 
450 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
451 {
452 	char *from = kmap_atomic(page);
453 	memcpy(to, from + offset, len);
454 	kunmap_atomic(from);
455 }
456 
457 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
458 {
459 	char *to = kmap_atomic(page);
460 	memcpy(to + offset, from, len);
461 	kunmap_atomic(to);
462 }
463 
464 static void memzero_page(struct page *page, size_t offset, size_t len)
465 {
466 	char *addr = kmap_atomic(page);
467 	memset(addr + offset, 0, len);
468 	kunmap_atomic(addr);
469 }
470 
471 static inline bool allocated(struct pipe_buffer *buf)
472 {
473 	return buf->ops == &default_pipe_buf_ops;
474 }
475 
476 static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
477 {
478 	size_t off = i->iov_offset;
479 	int idx = i->idx;
480 	if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
481 		idx = next_idx(idx, i->pipe);
482 		off = 0;
483 	}
484 	*idxp = idx;
485 	*offp = off;
486 }
487 
488 static size_t push_pipe(struct iov_iter *i, size_t size,
489 			int *idxp, size_t *offp)
490 {
491 	struct pipe_inode_info *pipe = i->pipe;
492 	size_t off;
493 	int idx;
494 	ssize_t left;
495 
496 	if (unlikely(size > i->count))
497 		size = i->count;
498 	if (unlikely(!size))
499 		return 0;
500 
501 	left = size;
502 	data_start(i, &idx, &off);
503 	*idxp = idx;
504 	*offp = off;
505 	if (off) {
506 		left -= PAGE_SIZE - off;
507 		if (left <= 0) {
508 			pipe->bufs[idx].len += size;
509 			return size;
510 		}
511 		pipe->bufs[idx].len = PAGE_SIZE;
512 		idx = next_idx(idx, pipe);
513 	}
514 	while (idx != pipe->curbuf || !pipe->nrbufs) {
515 		struct page *page = alloc_page(GFP_USER);
516 		if (!page)
517 			break;
518 		pipe->nrbufs++;
519 		pipe->bufs[idx].ops = &default_pipe_buf_ops;
520 		pipe->bufs[idx].page = page;
521 		pipe->bufs[idx].offset = 0;
522 		if (left <= PAGE_SIZE) {
523 			pipe->bufs[idx].len = left;
524 			return size;
525 		}
526 		pipe->bufs[idx].len = PAGE_SIZE;
527 		left -= PAGE_SIZE;
528 		idx = next_idx(idx, pipe);
529 	}
530 	return size - left;
531 }
532 
533 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
534 				struct iov_iter *i)
535 {
536 	struct pipe_inode_info *pipe = i->pipe;
537 	size_t n, off;
538 	int idx;
539 
540 	if (!sanity(i))
541 		return 0;
542 
543 	bytes = n = push_pipe(i, bytes, &idx, &off);
544 	if (unlikely(!n))
545 		return 0;
546 	for ( ; n; idx = next_idx(idx, pipe), off = 0) {
547 		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
548 		memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
549 		i->idx = idx;
550 		i->iov_offset = off + chunk;
551 		n -= chunk;
552 		addr += chunk;
553 	}
554 	i->count -= bytes;
555 	return bytes;
556 }
557 
558 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
559 {
560 	const char *from = addr;
561 	if (unlikely(i->type & ITER_PIPE))
562 		return copy_pipe_to_iter(addr, bytes, i);
563 	if (iter_is_iovec(i))
564 		might_fault();
565 	iterate_and_advance(i, bytes, v,
566 		copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
567 		memcpy_to_page(v.bv_page, v.bv_offset,
568 			       (from += v.bv_len) - v.bv_len, v.bv_len),
569 		memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
570 	)
571 
572 	return bytes;
573 }
574 EXPORT_SYMBOL(_copy_to_iter);
575 
576 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
577 {
578 	char *to = addr;
579 	if (unlikely(i->type & ITER_PIPE)) {
580 		WARN_ON(1);
581 		return 0;
582 	}
583 	if (iter_is_iovec(i))
584 		might_fault();
585 	iterate_and_advance(i, bytes, v,
586 		copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
587 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
588 				 v.bv_offset, v.bv_len),
589 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
590 	)
591 
592 	return bytes;
593 }
594 EXPORT_SYMBOL(_copy_from_iter);
595 
596 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
597 {
598 	char *to = addr;
599 	if (unlikely(i->type & ITER_PIPE)) {
600 		WARN_ON(1);
601 		return false;
602 	}
603 	if (unlikely(i->count < bytes))
604 		return false;
605 
606 	if (iter_is_iovec(i))
607 		might_fault();
608 	iterate_all_kinds(i, bytes, v, ({
609 		if (copyin((to += v.iov_len) - v.iov_len,
610 				      v.iov_base, v.iov_len))
611 			return false;
612 		0;}),
613 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
614 				 v.bv_offset, v.bv_len),
615 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
616 	)
617 
618 	iov_iter_advance(i, bytes);
619 	return true;
620 }
621 EXPORT_SYMBOL(_copy_from_iter_full);
622 
623 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
624 {
625 	char *to = addr;
626 	if (unlikely(i->type & ITER_PIPE)) {
627 		WARN_ON(1);
628 		return 0;
629 	}
630 	iterate_and_advance(i, bytes, v,
631 		__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
632 					 v.iov_base, v.iov_len),
633 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
634 				 v.bv_offset, v.bv_len),
635 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
636 	)
637 
638 	return bytes;
639 }
640 EXPORT_SYMBOL(_copy_from_iter_nocache);
641 
642 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
643 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
644 {
645 	char *to = addr;
646 	if (unlikely(i->type & ITER_PIPE)) {
647 		WARN_ON(1);
648 		return 0;
649 	}
650 	iterate_and_advance(i, bytes, v,
651 		__copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
652 					 v.iov_base, v.iov_len),
653 		memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
654 				 v.bv_offset, v.bv_len),
655 		memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
656 			v.iov_len)
657 	)
658 
659 	return bytes;
660 }
661 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
662 #endif
663 
664 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
665 {
666 	char *to = addr;
667 	if (unlikely(i->type & ITER_PIPE)) {
668 		WARN_ON(1);
669 		return false;
670 	}
671 	if (unlikely(i->count < bytes))
672 		return false;
673 	iterate_all_kinds(i, bytes, v, ({
674 		if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
675 					     v.iov_base, v.iov_len))
676 			return false;
677 		0;}),
678 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
679 				 v.bv_offset, v.bv_len),
680 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
681 	)
682 
683 	iov_iter_advance(i, bytes);
684 	return true;
685 }
686 EXPORT_SYMBOL(_copy_from_iter_full_nocache);
687 
688 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
689 {
690 	struct page *head = compound_head(page);
691 	size_t v = n + offset + page_address(page) - page_address(head);
692 
693 	if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
694 		return true;
695 	WARN_ON(1);
696 	return false;
697 }
698 
699 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
700 			 struct iov_iter *i)
701 {
702 	if (unlikely(!page_copy_sane(page, offset, bytes)))
703 		return 0;
704 	if (i->type & (ITER_BVEC|ITER_KVEC)) {
705 		void *kaddr = kmap_atomic(page);
706 		size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
707 		kunmap_atomic(kaddr);
708 		return wanted;
709 	} else if (likely(!(i->type & ITER_PIPE)))
710 		return copy_page_to_iter_iovec(page, offset, bytes, i);
711 	else
712 		return copy_page_to_iter_pipe(page, offset, bytes, i);
713 }
714 EXPORT_SYMBOL(copy_page_to_iter);
715 
716 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
717 			 struct iov_iter *i)
718 {
719 	if (unlikely(!page_copy_sane(page, offset, bytes)))
720 		return 0;
721 	if (unlikely(i->type & ITER_PIPE)) {
722 		WARN_ON(1);
723 		return 0;
724 	}
725 	if (i->type & (ITER_BVEC|ITER_KVEC)) {
726 		void *kaddr = kmap_atomic(page);
727 		size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
728 		kunmap_atomic(kaddr);
729 		return wanted;
730 	} else
731 		return copy_page_from_iter_iovec(page, offset, bytes, i);
732 }
733 EXPORT_SYMBOL(copy_page_from_iter);
734 
735 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
736 {
737 	struct pipe_inode_info *pipe = i->pipe;
738 	size_t n, off;
739 	int idx;
740 
741 	if (!sanity(i))
742 		return 0;
743 
744 	bytes = n = push_pipe(i, bytes, &idx, &off);
745 	if (unlikely(!n))
746 		return 0;
747 
748 	for ( ; n; idx = next_idx(idx, pipe), off = 0) {
749 		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
750 		memzero_page(pipe->bufs[idx].page, off, chunk);
751 		i->idx = idx;
752 		i->iov_offset = off + chunk;
753 		n -= chunk;
754 	}
755 	i->count -= bytes;
756 	return bytes;
757 }
758 
759 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
760 {
761 	if (unlikely(i->type & ITER_PIPE))
762 		return pipe_zero(bytes, i);
763 	iterate_and_advance(i, bytes, v,
764 		clear_user(v.iov_base, v.iov_len),
765 		memzero_page(v.bv_page, v.bv_offset, v.bv_len),
766 		memset(v.iov_base, 0, v.iov_len)
767 	)
768 
769 	return bytes;
770 }
771 EXPORT_SYMBOL(iov_iter_zero);
772 
773 size_t iov_iter_copy_from_user_atomic(struct page *page,
774 		struct iov_iter *i, unsigned long offset, size_t bytes)
775 {
776 	char *kaddr = kmap_atomic(page), *p = kaddr + offset;
777 	if (unlikely(!page_copy_sane(page, offset, bytes))) {
778 		kunmap_atomic(kaddr);
779 		return 0;
780 	}
781 	if (unlikely(i->type & ITER_PIPE)) {
782 		kunmap_atomic(kaddr);
783 		WARN_ON(1);
784 		return 0;
785 	}
786 	iterate_all_kinds(i, bytes, v,
787 		copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
788 		memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
789 				 v.bv_offset, v.bv_len),
790 		memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
791 	)
792 	kunmap_atomic(kaddr);
793 	return bytes;
794 }
795 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
796 
797 static inline void pipe_truncate(struct iov_iter *i)
798 {
799 	struct pipe_inode_info *pipe = i->pipe;
800 	if (pipe->nrbufs) {
801 		size_t off = i->iov_offset;
802 		int idx = i->idx;
803 		int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
804 		if (off) {
805 			pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
806 			idx = next_idx(idx, pipe);
807 			nrbufs++;
808 		}
809 		while (pipe->nrbufs > nrbufs) {
810 			pipe_buf_release(pipe, &pipe->bufs[idx]);
811 			idx = next_idx(idx, pipe);
812 			pipe->nrbufs--;
813 		}
814 	}
815 }
816 
817 static void pipe_advance(struct iov_iter *i, size_t size)
818 {
819 	struct pipe_inode_info *pipe = i->pipe;
820 	if (unlikely(i->count < size))
821 		size = i->count;
822 	if (size) {
823 		struct pipe_buffer *buf;
824 		size_t off = i->iov_offset, left = size;
825 		int idx = i->idx;
826 		if (off) /* make it relative to the beginning of buffer */
827 			left += off - pipe->bufs[idx].offset;
828 		while (1) {
829 			buf = &pipe->bufs[idx];
830 			if (left <= buf->len)
831 				break;
832 			left -= buf->len;
833 			idx = next_idx(idx, pipe);
834 		}
835 		i->idx = idx;
836 		i->iov_offset = buf->offset + left;
837 	}
838 	i->count -= size;
839 	/* ... and discard everything past that point */
840 	pipe_truncate(i);
841 }
842 
843 void iov_iter_advance(struct iov_iter *i, size_t size)
844 {
845 	if (unlikely(i->type & ITER_PIPE)) {
846 		pipe_advance(i, size);
847 		return;
848 	}
849 	iterate_and_advance(i, size, v, 0, 0, 0)
850 }
851 EXPORT_SYMBOL(iov_iter_advance);
852 
853 void iov_iter_revert(struct iov_iter *i, size_t unroll)
854 {
855 	if (!unroll)
856 		return;
857 	if (WARN_ON(unroll > MAX_RW_COUNT))
858 		return;
859 	i->count += unroll;
860 	if (unlikely(i->type & ITER_PIPE)) {
861 		struct pipe_inode_info *pipe = i->pipe;
862 		int idx = i->idx;
863 		size_t off = i->iov_offset;
864 		while (1) {
865 			size_t n = off - pipe->bufs[idx].offset;
866 			if (unroll < n) {
867 				off -= unroll;
868 				break;
869 			}
870 			unroll -= n;
871 			if (!unroll && idx == i->start_idx) {
872 				off = 0;
873 				break;
874 			}
875 			if (!idx--)
876 				idx = pipe->buffers - 1;
877 			off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
878 		}
879 		i->iov_offset = off;
880 		i->idx = idx;
881 		pipe_truncate(i);
882 		return;
883 	}
884 	if (unroll <= i->iov_offset) {
885 		i->iov_offset -= unroll;
886 		return;
887 	}
888 	unroll -= i->iov_offset;
889 	if (i->type & ITER_BVEC) {
890 		const struct bio_vec *bvec = i->bvec;
891 		while (1) {
892 			size_t n = (--bvec)->bv_len;
893 			i->nr_segs++;
894 			if (unroll <= n) {
895 				i->bvec = bvec;
896 				i->iov_offset = n - unroll;
897 				return;
898 			}
899 			unroll -= n;
900 		}
901 	} else { /* same logics for iovec and kvec */
902 		const struct iovec *iov = i->iov;
903 		while (1) {
904 			size_t n = (--iov)->iov_len;
905 			i->nr_segs++;
906 			if (unroll <= n) {
907 				i->iov = iov;
908 				i->iov_offset = n - unroll;
909 				return;
910 			}
911 			unroll -= n;
912 		}
913 	}
914 }
915 EXPORT_SYMBOL(iov_iter_revert);
916 
917 /*
918  * Return the count of just the current iov_iter segment.
919  */
920 size_t iov_iter_single_seg_count(const struct iov_iter *i)
921 {
922 	if (unlikely(i->type & ITER_PIPE))
923 		return i->count;	// it is a silly place, anyway
924 	if (i->nr_segs == 1)
925 		return i->count;
926 	else if (i->type & ITER_BVEC)
927 		return min(i->count, i->bvec->bv_len - i->iov_offset);
928 	else
929 		return min(i->count, i->iov->iov_len - i->iov_offset);
930 }
931 EXPORT_SYMBOL(iov_iter_single_seg_count);
932 
933 void iov_iter_kvec(struct iov_iter *i, int direction,
934 			const struct kvec *kvec, unsigned long nr_segs,
935 			size_t count)
936 {
937 	BUG_ON(!(direction & ITER_KVEC));
938 	i->type = direction;
939 	i->kvec = kvec;
940 	i->nr_segs = nr_segs;
941 	i->iov_offset = 0;
942 	i->count = count;
943 }
944 EXPORT_SYMBOL(iov_iter_kvec);
945 
946 void iov_iter_bvec(struct iov_iter *i, int direction,
947 			const struct bio_vec *bvec, unsigned long nr_segs,
948 			size_t count)
949 {
950 	BUG_ON(!(direction & ITER_BVEC));
951 	i->type = direction;
952 	i->bvec = bvec;
953 	i->nr_segs = nr_segs;
954 	i->iov_offset = 0;
955 	i->count = count;
956 }
957 EXPORT_SYMBOL(iov_iter_bvec);
958 
959 void iov_iter_pipe(struct iov_iter *i, int direction,
960 			struct pipe_inode_info *pipe,
961 			size_t count)
962 {
963 	BUG_ON(direction != ITER_PIPE);
964 	WARN_ON(pipe->nrbufs == pipe->buffers);
965 	i->type = direction;
966 	i->pipe = pipe;
967 	i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
968 	i->iov_offset = 0;
969 	i->count = count;
970 	i->start_idx = i->idx;
971 }
972 EXPORT_SYMBOL(iov_iter_pipe);
973 
974 unsigned long iov_iter_alignment(const struct iov_iter *i)
975 {
976 	unsigned long res = 0;
977 	size_t size = i->count;
978 
979 	if (unlikely(i->type & ITER_PIPE)) {
980 		if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
981 			return size | i->iov_offset;
982 		return size;
983 	}
984 	iterate_all_kinds(i, size, v,
985 		(res |= (unsigned long)v.iov_base | v.iov_len, 0),
986 		res |= v.bv_offset | v.bv_len,
987 		res |= (unsigned long)v.iov_base | v.iov_len
988 	)
989 	return res;
990 }
991 EXPORT_SYMBOL(iov_iter_alignment);
992 
993 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
994 {
995 	unsigned long res = 0;
996 	size_t size = i->count;
997 
998 	if (unlikely(i->type & ITER_PIPE)) {
999 		WARN_ON(1);
1000 		return ~0U;
1001 	}
1002 
1003 	iterate_all_kinds(i, size, v,
1004 		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
1005 			(size != v.iov_len ? size : 0), 0),
1006 		(res |= (!res ? 0 : (unsigned long)v.bv_offset) |
1007 			(size != v.bv_len ? size : 0)),
1008 		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
1009 			(size != v.iov_len ? size : 0))
1010 		);
1011 	return res;
1012 }
1013 EXPORT_SYMBOL(iov_iter_gap_alignment);
1014 
1015 static inline size_t __pipe_get_pages(struct iov_iter *i,
1016 				size_t maxsize,
1017 				struct page **pages,
1018 				int idx,
1019 				size_t *start)
1020 {
1021 	struct pipe_inode_info *pipe = i->pipe;
1022 	ssize_t n = push_pipe(i, maxsize, &idx, start);
1023 	if (!n)
1024 		return -EFAULT;
1025 
1026 	maxsize = n;
1027 	n += *start;
1028 	while (n > 0) {
1029 		get_page(*pages++ = pipe->bufs[idx].page);
1030 		idx = next_idx(idx, pipe);
1031 		n -= PAGE_SIZE;
1032 	}
1033 
1034 	return maxsize;
1035 }
1036 
1037 static ssize_t pipe_get_pages(struct iov_iter *i,
1038 		   struct page **pages, size_t maxsize, unsigned maxpages,
1039 		   size_t *start)
1040 {
1041 	unsigned npages;
1042 	size_t capacity;
1043 	int idx;
1044 
1045 	if (!maxsize)
1046 		return 0;
1047 
1048 	if (!sanity(i))
1049 		return -EFAULT;
1050 
1051 	data_start(i, &idx, start);
1052 	/* some of this one + all after this one */
1053 	npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
1054 	capacity = min(npages,maxpages) * PAGE_SIZE - *start;
1055 
1056 	return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
1057 }
1058 
1059 ssize_t iov_iter_get_pages(struct iov_iter *i,
1060 		   struct page **pages, size_t maxsize, unsigned maxpages,
1061 		   size_t *start)
1062 {
1063 	if (maxsize > i->count)
1064 		maxsize = i->count;
1065 
1066 	if (unlikely(i->type & ITER_PIPE))
1067 		return pipe_get_pages(i, pages, maxsize, maxpages, start);
1068 	iterate_all_kinds(i, maxsize, v, ({
1069 		unsigned long addr = (unsigned long)v.iov_base;
1070 		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1071 		int n;
1072 		int res;
1073 
1074 		if (len > maxpages * PAGE_SIZE)
1075 			len = maxpages * PAGE_SIZE;
1076 		addr &= ~(PAGE_SIZE - 1);
1077 		n = DIV_ROUND_UP(len, PAGE_SIZE);
1078 		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
1079 		if (unlikely(res < 0))
1080 			return res;
1081 		return (res == n ? len : res * PAGE_SIZE) - *start;
1082 	0;}),({
1083 		/* can't be more than PAGE_SIZE */
1084 		*start = v.bv_offset;
1085 		get_page(*pages = v.bv_page);
1086 		return v.bv_len;
1087 	}),({
1088 		return -EFAULT;
1089 	})
1090 	)
1091 	return 0;
1092 }
1093 EXPORT_SYMBOL(iov_iter_get_pages);
1094 
1095 static struct page **get_pages_array(size_t n)
1096 {
1097 	return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1098 }
1099 
1100 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1101 		   struct page ***pages, size_t maxsize,
1102 		   size_t *start)
1103 {
1104 	struct page **p;
1105 	size_t n;
1106 	int idx;
1107 	int npages;
1108 
1109 	if (!maxsize)
1110 		return 0;
1111 
1112 	if (!sanity(i))
1113 		return -EFAULT;
1114 
1115 	data_start(i, &idx, start);
1116 	/* some of this one + all after this one */
1117 	npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
1118 	n = npages * PAGE_SIZE - *start;
1119 	if (maxsize > n)
1120 		maxsize = n;
1121 	else
1122 		npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1123 	p = get_pages_array(npages);
1124 	if (!p)
1125 		return -ENOMEM;
1126 	n = __pipe_get_pages(i, maxsize, p, idx, start);
1127 	if (n > 0)
1128 		*pages = p;
1129 	else
1130 		kvfree(p);
1131 	return n;
1132 }
1133 
1134 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1135 		   struct page ***pages, size_t maxsize,
1136 		   size_t *start)
1137 {
1138 	struct page **p;
1139 
1140 	if (maxsize > i->count)
1141 		maxsize = i->count;
1142 
1143 	if (unlikely(i->type & ITER_PIPE))
1144 		return pipe_get_pages_alloc(i, pages, maxsize, start);
1145 	iterate_all_kinds(i, maxsize, v, ({
1146 		unsigned long addr = (unsigned long)v.iov_base;
1147 		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1148 		int n;
1149 		int res;
1150 
1151 		addr &= ~(PAGE_SIZE - 1);
1152 		n = DIV_ROUND_UP(len, PAGE_SIZE);
1153 		p = get_pages_array(n);
1154 		if (!p)
1155 			return -ENOMEM;
1156 		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
1157 		if (unlikely(res < 0)) {
1158 			kvfree(p);
1159 			return res;
1160 		}
1161 		*pages = p;
1162 		return (res == n ? len : res * PAGE_SIZE) - *start;
1163 	0;}),({
1164 		/* can't be more than PAGE_SIZE */
1165 		*start = v.bv_offset;
1166 		*pages = p = get_pages_array(1);
1167 		if (!p)
1168 			return -ENOMEM;
1169 		get_page(*p = v.bv_page);
1170 		return v.bv_len;
1171 	}),({
1172 		return -EFAULT;
1173 	})
1174 	)
1175 	return 0;
1176 }
1177 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1178 
1179 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1180 			       struct iov_iter *i)
1181 {
1182 	char *to = addr;
1183 	__wsum sum, next;
1184 	size_t off = 0;
1185 	sum = *csum;
1186 	if (unlikely(i->type & ITER_PIPE)) {
1187 		WARN_ON(1);
1188 		return 0;
1189 	}
1190 	iterate_and_advance(i, bytes, v, ({
1191 		int err = 0;
1192 		next = csum_and_copy_from_user(v.iov_base,
1193 					       (to += v.iov_len) - v.iov_len,
1194 					       v.iov_len, 0, &err);
1195 		if (!err) {
1196 			sum = csum_block_add(sum, next, off);
1197 			off += v.iov_len;
1198 		}
1199 		err ? v.iov_len : 0;
1200 	}), ({
1201 		char *p = kmap_atomic(v.bv_page);
1202 		next = csum_partial_copy_nocheck(p + v.bv_offset,
1203 						 (to += v.bv_len) - v.bv_len,
1204 						 v.bv_len, 0);
1205 		kunmap_atomic(p);
1206 		sum = csum_block_add(sum, next, off);
1207 		off += v.bv_len;
1208 	}),({
1209 		next = csum_partial_copy_nocheck(v.iov_base,
1210 						 (to += v.iov_len) - v.iov_len,
1211 						 v.iov_len, 0);
1212 		sum = csum_block_add(sum, next, off);
1213 		off += v.iov_len;
1214 	})
1215 	)
1216 	*csum = sum;
1217 	return bytes;
1218 }
1219 EXPORT_SYMBOL(csum_and_copy_from_iter);
1220 
1221 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1222 			       struct iov_iter *i)
1223 {
1224 	char *to = addr;
1225 	__wsum sum, next;
1226 	size_t off = 0;
1227 	sum = *csum;
1228 	if (unlikely(i->type & ITER_PIPE)) {
1229 		WARN_ON(1);
1230 		return false;
1231 	}
1232 	if (unlikely(i->count < bytes))
1233 		return false;
1234 	iterate_all_kinds(i, bytes, v, ({
1235 		int err = 0;
1236 		next = csum_and_copy_from_user(v.iov_base,
1237 					       (to += v.iov_len) - v.iov_len,
1238 					       v.iov_len, 0, &err);
1239 		if (err)
1240 			return false;
1241 		sum = csum_block_add(sum, next, off);
1242 		off += v.iov_len;
1243 		0;
1244 	}), ({
1245 		char *p = kmap_atomic(v.bv_page);
1246 		next = csum_partial_copy_nocheck(p + v.bv_offset,
1247 						 (to += v.bv_len) - v.bv_len,
1248 						 v.bv_len, 0);
1249 		kunmap_atomic(p);
1250 		sum = csum_block_add(sum, next, off);
1251 		off += v.bv_len;
1252 	}),({
1253 		next = csum_partial_copy_nocheck(v.iov_base,
1254 						 (to += v.iov_len) - v.iov_len,
1255 						 v.iov_len, 0);
1256 		sum = csum_block_add(sum, next, off);
1257 		off += v.iov_len;
1258 	})
1259 	)
1260 	*csum = sum;
1261 	iov_iter_advance(i, bytes);
1262 	return true;
1263 }
1264 EXPORT_SYMBOL(csum_and_copy_from_iter_full);
1265 
1266 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
1267 			     struct iov_iter *i)
1268 {
1269 	const char *from = addr;
1270 	__wsum sum, next;
1271 	size_t off = 0;
1272 	sum = *csum;
1273 	if (unlikely(i->type & ITER_PIPE)) {
1274 		WARN_ON(1);	/* for now */
1275 		return 0;
1276 	}
1277 	iterate_and_advance(i, bytes, v, ({
1278 		int err = 0;
1279 		next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1280 					     v.iov_base,
1281 					     v.iov_len, 0, &err);
1282 		if (!err) {
1283 			sum = csum_block_add(sum, next, off);
1284 			off += v.iov_len;
1285 		}
1286 		err ? v.iov_len : 0;
1287 	}), ({
1288 		char *p = kmap_atomic(v.bv_page);
1289 		next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
1290 						 p + v.bv_offset,
1291 						 v.bv_len, 0);
1292 		kunmap_atomic(p);
1293 		sum = csum_block_add(sum, next, off);
1294 		off += v.bv_len;
1295 	}),({
1296 		next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
1297 						 v.iov_base,
1298 						 v.iov_len, 0);
1299 		sum = csum_block_add(sum, next, off);
1300 		off += v.iov_len;
1301 	})
1302 	)
1303 	*csum = sum;
1304 	return bytes;
1305 }
1306 EXPORT_SYMBOL(csum_and_copy_to_iter);
1307 
1308 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1309 {
1310 	size_t size = i->count;
1311 	int npages = 0;
1312 
1313 	if (!size)
1314 		return 0;
1315 
1316 	if (unlikely(i->type & ITER_PIPE)) {
1317 		struct pipe_inode_info *pipe = i->pipe;
1318 		size_t off;
1319 		int idx;
1320 
1321 		if (!sanity(i))
1322 			return 0;
1323 
1324 		data_start(i, &idx, &off);
1325 		/* some of this one + all after this one */
1326 		npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
1327 		if (npages >= maxpages)
1328 			return maxpages;
1329 	} else iterate_all_kinds(i, size, v, ({
1330 		unsigned long p = (unsigned long)v.iov_base;
1331 		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1332 			- p / PAGE_SIZE;
1333 		if (npages >= maxpages)
1334 			return maxpages;
1335 	0;}),({
1336 		npages++;
1337 		if (npages >= maxpages)
1338 			return maxpages;
1339 	}),({
1340 		unsigned long p = (unsigned long)v.iov_base;
1341 		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1342 			- p / PAGE_SIZE;
1343 		if (npages >= maxpages)
1344 			return maxpages;
1345 	})
1346 	)
1347 	return npages;
1348 }
1349 EXPORT_SYMBOL(iov_iter_npages);
1350 
1351 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1352 {
1353 	*new = *old;
1354 	if (unlikely(new->type & ITER_PIPE)) {
1355 		WARN_ON(1);
1356 		return NULL;
1357 	}
1358 	if (new->type & ITER_BVEC)
1359 		return new->bvec = kmemdup(new->bvec,
1360 				    new->nr_segs * sizeof(struct bio_vec),
1361 				    flags);
1362 	else
1363 		/* iovec and kvec have identical layout */
1364 		return new->iov = kmemdup(new->iov,
1365 				   new->nr_segs * sizeof(struct iovec),
1366 				   flags);
1367 }
1368 EXPORT_SYMBOL(dup_iter);
1369 
1370 /**
1371  * import_iovec() - Copy an array of &struct iovec from userspace
1372  *     into the kernel, check that it is valid, and initialize a new
1373  *     &struct iov_iter iterator to access it.
1374  *
1375  * @type: One of %READ or %WRITE.
1376  * @uvector: Pointer to the userspace array.
1377  * @nr_segs: Number of elements in userspace array.
1378  * @fast_segs: Number of elements in @iov.
1379  * @iov: (input and output parameter) Pointer to pointer to (usually small
1380  *     on-stack) kernel array.
1381  * @i: Pointer to iterator that will be initialized on success.
1382  *
1383  * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1384  * then this function places %NULL in *@iov on return. Otherwise, a new
1385  * array will be allocated and the result placed in *@iov. This means that
1386  * the caller may call kfree() on *@iov regardless of whether the small
1387  * on-stack array was used or not (and regardless of whether this function
1388  * returns an error or not).
1389  *
1390  * Return: 0 on success or negative error code on error.
1391  */
1392 int import_iovec(int type, const struct iovec __user * uvector,
1393 		 unsigned nr_segs, unsigned fast_segs,
1394 		 struct iovec **iov, struct iov_iter *i)
1395 {
1396 	ssize_t n;
1397 	struct iovec *p;
1398 	n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1399 				  *iov, &p);
1400 	if (n < 0) {
1401 		if (p != *iov)
1402 			kfree(p);
1403 		*iov = NULL;
1404 		return n;
1405 	}
1406 	iov_iter_init(i, type, p, nr_segs, n);
1407 	*iov = p == *iov ? NULL : p;
1408 	return 0;
1409 }
1410 EXPORT_SYMBOL(import_iovec);
1411 
1412 #ifdef CONFIG_COMPAT
1413 #include <linux/compat.h>
1414 
1415 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
1416 		 unsigned nr_segs, unsigned fast_segs,
1417 		 struct iovec **iov, struct iov_iter *i)
1418 {
1419 	ssize_t n;
1420 	struct iovec *p;
1421 	n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1422 				  *iov, &p);
1423 	if (n < 0) {
1424 		if (p != *iov)
1425 			kfree(p);
1426 		*iov = NULL;
1427 		return n;
1428 	}
1429 	iov_iter_init(i, type, p, nr_segs, n);
1430 	*iov = p == *iov ? NULL : p;
1431 	return 0;
1432 }
1433 #endif
1434 
1435 int import_single_range(int rw, void __user *buf, size_t len,
1436 		 struct iovec *iov, struct iov_iter *i)
1437 {
1438 	if (len > MAX_RW_COUNT)
1439 		len = MAX_RW_COUNT;
1440 	if (unlikely(!access_ok(!rw, buf, len)))
1441 		return -EFAULT;
1442 
1443 	iov->iov_base = buf;
1444 	iov->iov_len = len;
1445 	iov_iter_init(i, rw, iov, 1, len);
1446 	return 0;
1447 }
1448 EXPORT_SYMBOL(import_single_range);
1449