xref: /openbmc/linux/lib/iov_iter.c (revision e6dec923)
1 #include <linux/export.h>
2 #include <linux/bvec.h>
3 #include <linux/uio.h>
4 #include <linux/pagemap.h>
5 #include <linux/slab.h>
6 #include <linux/vmalloc.h>
7 #include <linux/splice.h>
8 #include <net/checksum.h>
9 
10 #define PIPE_PARANOIA /* for now */
11 
12 #define iterate_iovec(i, n, __v, __p, skip, STEP) {	\
13 	size_t left;					\
14 	size_t wanted = n;				\
15 	__p = i->iov;					\
16 	__v.iov_len = min(n, __p->iov_len - skip);	\
17 	if (likely(__v.iov_len)) {			\
18 		__v.iov_base = __p->iov_base + skip;	\
19 		left = (STEP);				\
20 		__v.iov_len -= left;			\
21 		skip += __v.iov_len;			\
22 		n -= __v.iov_len;			\
23 	} else {					\
24 		left = 0;				\
25 	}						\
26 	while (unlikely(!left && n)) {			\
27 		__p++;					\
28 		__v.iov_len = min(n, __p->iov_len);	\
29 		if (unlikely(!__v.iov_len))		\
30 			continue;			\
31 		__v.iov_base = __p->iov_base;		\
32 		left = (STEP);				\
33 		__v.iov_len -= left;			\
34 		skip = __v.iov_len;			\
35 		n -= __v.iov_len;			\
36 	}						\
37 	n = wanted - n;					\
38 }
39 
40 #define iterate_kvec(i, n, __v, __p, skip, STEP) {	\
41 	size_t wanted = n;				\
42 	__p = i->kvec;					\
43 	__v.iov_len = min(n, __p->iov_len - skip);	\
44 	if (likely(__v.iov_len)) {			\
45 		__v.iov_base = __p->iov_base + skip;	\
46 		(void)(STEP);				\
47 		skip += __v.iov_len;			\
48 		n -= __v.iov_len;			\
49 	}						\
50 	while (unlikely(n)) {				\
51 		__p++;					\
52 		__v.iov_len = min(n, __p->iov_len);	\
53 		if (unlikely(!__v.iov_len))		\
54 			continue;			\
55 		__v.iov_base = __p->iov_base;		\
56 		(void)(STEP);				\
57 		skip = __v.iov_len;			\
58 		n -= __v.iov_len;			\
59 	}						\
60 	n = wanted;					\
61 }
62 
63 #define iterate_bvec(i, n, __v, __bi, skip, STEP) {	\
64 	struct bvec_iter __start;			\
65 	__start.bi_size = n;				\
66 	__start.bi_bvec_done = skip;			\
67 	__start.bi_idx = 0;				\
68 	for_each_bvec(__v, i->bvec, __bi, __start) {	\
69 		if (!__v.bv_len)			\
70 			continue;			\
71 		(void)(STEP);				\
72 	}						\
73 }
74 
75 #define iterate_all_kinds(i, n, v, I, B, K) {			\
76 	if (likely(n)) {					\
77 		size_t skip = i->iov_offset;			\
78 		if (unlikely(i->type & ITER_BVEC)) {		\
79 			struct bio_vec v;			\
80 			struct bvec_iter __bi;			\
81 			iterate_bvec(i, n, v, __bi, skip, (B))	\
82 		} else if (unlikely(i->type & ITER_KVEC)) {	\
83 			const struct kvec *kvec;		\
84 			struct kvec v;				\
85 			iterate_kvec(i, n, v, kvec, skip, (K))	\
86 		} else {					\
87 			const struct iovec *iov;		\
88 			struct iovec v;				\
89 			iterate_iovec(i, n, v, iov, skip, (I))	\
90 		}						\
91 	}							\
92 }
93 
94 #define iterate_and_advance(i, n, v, I, B, K) {			\
95 	if (unlikely(i->count < n))				\
96 		n = i->count;					\
97 	if (i->count) {						\
98 		size_t skip = i->iov_offset;			\
99 		if (unlikely(i->type & ITER_BVEC)) {		\
100 			const struct bio_vec *bvec = i->bvec;	\
101 			struct bio_vec v;			\
102 			struct bvec_iter __bi;			\
103 			iterate_bvec(i, n, v, __bi, skip, (B))	\
104 			i->bvec = __bvec_iter_bvec(i->bvec, __bi);	\
105 			i->nr_segs -= i->bvec - bvec;		\
106 			skip = __bi.bi_bvec_done;		\
107 		} else if (unlikely(i->type & ITER_KVEC)) {	\
108 			const struct kvec *kvec;		\
109 			struct kvec v;				\
110 			iterate_kvec(i, n, v, kvec, skip, (K))	\
111 			if (skip == kvec->iov_len) {		\
112 				kvec++;				\
113 				skip = 0;			\
114 			}					\
115 			i->nr_segs -= kvec - i->kvec;		\
116 			i->kvec = kvec;				\
117 		} else {					\
118 			const struct iovec *iov;		\
119 			struct iovec v;				\
120 			iterate_iovec(i, n, v, iov, skip, (I))	\
121 			if (skip == iov->iov_len) {		\
122 				iov++;				\
123 				skip = 0;			\
124 			}					\
125 			i->nr_segs -= iov - i->iov;		\
126 			i->iov = iov;				\
127 		}						\
128 		i->count -= n;					\
129 		i->iov_offset = skip;				\
130 	}							\
131 }
132 
133 static int copyout(void __user *to, const void *from, size_t n)
134 {
135 	if (access_ok(VERIFY_WRITE, to, n)) {
136 		kasan_check_read(from, n);
137 		n = raw_copy_to_user(to, from, n);
138 	}
139 	return n;
140 }
141 
142 static int copyin(void *to, const void __user *from, size_t n)
143 {
144 	if (access_ok(VERIFY_READ, from, n)) {
145 		kasan_check_write(to, n);
146 		n = raw_copy_from_user(to, from, n);
147 	}
148 	return n;
149 }
150 
151 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
152 			 struct iov_iter *i)
153 {
154 	size_t skip, copy, left, wanted;
155 	const struct iovec *iov;
156 	char __user *buf;
157 	void *kaddr, *from;
158 
159 	if (unlikely(bytes > i->count))
160 		bytes = i->count;
161 
162 	if (unlikely(!bytes))
163 		return 0;
164 
165 	might_fault();
166 	wanted = bytes;
167 	iov = i->iov;
168 	skip = i->iov_offset;
169 	buf = iov->iov_base + skip;
170 	copy = min(bytes, iov->iov_len - skip);
171 
172 	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
173 		kaddr = kmap_atomic(page);
174 		from = kaddr + offset;
175 
176 		/* first chunk, usually the only one */
177 		left = copyout(buf, from, copy);
178 		copy -= left;
179 		skip += copy;
180 		from += copy;
181 		bytes -= copy;
182 
183 		while (unlikely(!left && bytes)) {
184 			iov++;
185 			buf = iov->iov_base;
186 			copy = min(bytes, iov->iov_len);
187 			left = copyout(buf, from, copy);
188 			copy -= left;
189 			skip = copy;
190 			from += copy;
191 			bytes -= copy;
192 		}
193 		if (likely(!bytes)) {
194 			kunmap_atomic(kaddr);
195 			goto done;
196 		}
197 		offset = from - kaddr;
198 		buf += copy;
199 		kunmap_atomic(kaddr);
200 		copy = min(bytes, iov->iov_len - skip);
201 	}
202 	/* Too bad - revert to non-atomic kmap */
203 
204 	kaddr = kmap(page);
205 	from = kaddr + offset;
206 	left = copyout(buf, from, copy);
207 	copy -= left;
208 	skip += copy;
209 	from += copy;
210 	bytes -= copy;
211 	while (unlikely(!left && bytes)) {
212 		iov++;
213 		buf = iov->iov_base;
214 		copy = min(bytes, iov->iov_len);
215 		left = copyout(buf, from, copy);
216 		copy -= left;
217 		skip = copy;
218 		from += copy;
219 		bytes -= copy;
220 	}
221 	kunmap(page);
222 
223 done:
224 	if (skip == iov->iov_len) {
225 		iov++;
226 		skip = 0;
227 	}
228 	i->count -= wanted - bytes;
229 	i->nr_segs -= iov - i->iov;
230 	i->iov = iov;
231 	i->iov_offset = skip;
232 	return wanted - bytes;
233 }
234 
235 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
236 			 struct iov_iter *i)
237 {
238 	size_t skip, copy, left, wanted;
239 	const struct iovec *iov;
240 	char __user *buf;
241 	void *kaddr, *to;
242 
243 	if (unlikely(bytes > i->count))
244 		bytes = i->count;
245 
246 	if (unlikely(!bytes))
247 		return 0;
248 
249 	might_fault();
250 	wanted = bytes;
251 	iov = i->iov;
252 	skip = i->iov_offset;
253 	buf = iov->iov_base + skip;
254 	copy = min(bytes, iov->iov_len - skip);
255 
256 	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
257 		kaddr = kmap_atomic(page);
258 		to = kaddr + offset;
259 
260 		/* first chunk, usually the only one */
261 		left = copyin(to, buf, copy);
262 		copy -= left;
263 		skip += copy;
264 		to += copy;
265 		bytes -= copy;
266 
267 		while (unlikely(!left && bytes)) {
268 			iov++;
269 			buf = iov->iov_base;
270 			copy = min(bytes, iov->iov_len);
271 			left = copyin(to, buf, copy);
272 			copy -= left;
273 			skip = copy;
274 			to += copy;
275 			bytes -= copy;
276 		}
277 		if (likely(!bytes)) {
278 			kunmap_atomic(kaddr);
279 			goto done;
280 		}
281 		offset = to - kaddr;
282 		buf += copy;
283 		kunmap_atomic(kaddr);
284 		copy = min(bytes, iov->iov_len - skip);
285 	}
286 	/* Too bad - revert to non-atomic kmap */
287 
288 	kaddr = kmap(page);
289 	to = kaddr + offset;
290 	left = copyin(to, buf, copy);
291 	copy -= left;
292 	skip += copy;
293 	to += copy;
294 	bytes -= copy;
295 	while (unlikely(!left && bytes)) {
296 		iov++;
297 		buf = iov->iov_base;
298 		copy = min(bytes, iov->iov_len);
299 		left = copyin(to, buf, copy);
300 		copy -= left;
301 		skip = copy;
302 		to += copy;
303 		bytes -= copy;
304 	}
305 	kunmap(page);
306 
307 done:
308 	if (skip == iov->iov_len) {
309 		iov++;
310 		skip = 0;
311 	}
312 	i->count -= wanted - bytes;
313 	i->nr_segs -= iov - i->iov;
314 	i->iov = iov;
315 	i->iov_offset = skip;
316 	return wanted - bytes;
317 }
318 
319 #ifdef PIPE_PARANOIA
320 static bool sanity(const struct iov_iter *i)
321 {
322 	struct pipe_inode_info *pipe = i->pipe;
323 	int idx = i->idx;
324 	int next = pipe->curbuf + pipe->nrbufs;
325 	if (i->iov_offset) {
326 		struct pipe_buffer *p;
327 		if (unlikely(!pipe->nrbufs))
328 			goto Bad;	// pipe must be non-empty
329 		if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
330 			goto Bad;	// must be at the last buffer...
331 
332 		p = &pipe->bufs[idx];
333 		if (unlikely(p->offset + p->len != i->iov_offset))
334 			goto Bad;	// ... at the end of segment
335 	} else {
336 		if (idx != (next & (pipe->buffers - 1)))
337 			goto Bad;	// must be right after the last buffer
338 	}
339 	return true;
340 Bad:
341 	printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
342 	printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
343 			pipe->curbuf, pipe->nrbufs, pipe->buffers);
344 	for (idx = 0; idx < pipe->buffers; idx++)
345 		printk(KERN_ERR "[%p %p %d %d]\n",
346 			pipe->bufs[idx].ops,
347 			pipe->bufs[idx].page,
348 			pipe->bufs[idx].offset,
349 			pipe->bufs[idx].len);
350 	WARN_ON(1);
351 	return false;
352 }
353 #else
354 #define sanity(i) true
355 #endif
356 
357 static inline int next_idx(int idx, struct pipe_inode_info *pipe)
358 {
359 	return (idx + 1) & (pipe->buffers - 1);
360 }
361 
362 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
363 			 struct iov_iter *i)
364 {
365 	struct pipe_inode_info *pipe = i->pipe;
366 	struct pipe_buffer *buf;
367 	size_t off;
368 	int idx;
369 
370 	if (unlikely(bytes > i->count))
371 		bytes = i->count;
372 
373 	if (unlikely(!bytes))
374 		return 0;
375 
376 	if (!sanity(i))
377 		return 0;
378 
379 	off = i->iov_offset;
380 	idx = i->idx;
381 	buf = &pipe->bufs[idx];
382 	if (off) {
383 		if (offset == off && buf->page == page) {
384 			/* merge with the last one */
385 			buf->len += bytes;
386 			i->iov_offset += bytes;
387 			goto out;
388 		}
389 		idx = next_idx(idx, pipe);
390 		buf = &pipe->bufs[idx];
391 	}
392 	if (idx == pipe->curbuf && pipe->nrbufs)
393 		return 0;
394 	pipe->nrbufs++;
395 	buf->ops = &page_cache_pipe_buf_ops;
396 	get_page(buf->page = page);
397 	buf->offset = offset;
398 	buf->len = bytes;
399 	i->iov_offset = offset + bytes;
400 	i->idx = idx;
401 out:
402 	i->count -= bytes;
403 	return bytes;
404 }
405 
406 /*
407  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
408  * bytes.  For each iovec, fault in each page that constitutes the iovec.
409  *
410  * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
411  * because it is an invalid address).
412  */
413 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
414 {
415 	size_t skip = i->iov_offset;
416 	const struct iovec *iov;
417 	int err;
418 	struct iovec v;
419 
420 	if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
421 		iterate_iovec(i, bytes, v, iov, skip, ({
422 			err = fault_in_pages_readable(v.iov_base, v.iov_len);
423 			if (unlikely(err))
424 			return err;
425 		0;}))
426 	}
427 	return 0;
428 }
429 EXPORT_SYMBOL(iov_iter_fault_in_readable);
430 
431 void iov_iter_init(struct iov_iter *i, int direction,
432 			const struct iovec *iov, unsigned long nr_segs,
433 			size_t count)
434 {
435 	/* It will get better.  Eventually... */
436 	if (uaccess_kernel()) {
437 		direction |= ITER_KVEC;
438 		i->type = direction;
439 		i->kvec = (struct kvec *)iov;
440 	} else {
441 		i->type = direction;
442 		i->iov = iov;
443 	}
444 	i->nr_segs = nr_segs;
445 	i->iov_offset = 0;
446 	i->count = count;
447 }
448 EXPORT_SYMBOL(iov_iter_init);
449 
450 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
451 {
452 	char *from = kmap_atomic(page);
453 	memcpy(to, from + offset, len);
454 	kunmap_atomic(from);
455 }
456 
457 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
458 {
459 	char *to = kmap_atomic(page);
460 	memcpy(to + offset, from, len);
461 	kunmap_atomic(to);
462 }
463 
464 static void memzero_page(struct page *page, size_t offset, size_t len)
465 {
466 	char *addr = kmap_atomic(page);
467 	memset(addr + offset, 0, len);
468 	kunmap_atomic(addr);
469 }
470 
471 static inline bool allocated(struct pipe_buffer *buf)
472 {
473 	return buf->ops == &default_pipe_buf_ops;
474 }
475 
476 static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
477 {
478 	size_t off = i->iov_offset;
479 	int idx = i->idx;
480 	if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
481 		idx = next_idx(idx, i->pipe);
482 		off = 0;
483 	}
484 	*idxp = idx;
485 	*offp = off;
486 }
487 
488 static size_t push_pipe(struct iov_iter *i, size_t size,
489 			int *idxp, size_t *offp)
490 {
491 	struct pipe_inode_info *pipe = i->pipe;
492 	size_t off;
493 	int idx;
494 	ssize_t left;
495 
496 	if (unlikely(size > i->count))
497 		size = i->count;
498 	if (unlikely(!size))
499 		return 0;
500 
501 	left = size;
502 	data_start(i, &idx, &off);
503 	*idxp = idx;
504 	*offp = off;
505 	if (off) {
506 		left -= PAGE_SIZE - off;
507 		if (left <= 0) {
508 			pipe->bufs[idx].len += size;
509 			return size;
510 		}
511 		pipe->bufs[idx].len = PAGE_SIZE;
512 		idx = next_idx(idx, pipe);
513 	}
514 	while (idx != pipe->curbuf || !pipe->nrbufs) {
515 		struct page *page = alloc_page(GFP_USER);
516 		if (!page)
517 			break;
518 		pipe->nrbufs++;
519 		pipe->bufs[idx].ops = &default_pipe_buf_ops;
520 		pipe->bufs[idx].page = page;
521 		pipe->bufs[idx].offset = 0;
522 		if (left <= PAGE_SIZE) {
523 			pipe->bufs[idx].len = left;
524 			return size;
525 		}
526 		pipe->bufs[idx].len = PAGE_SIZE;
527 		left -= PAGE_SIZE;
528 		idx = next_idx(idx, pipe);
529 	}
530 	return size - left;
531 }
532 
533 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
534 				struct iov_iter *i)
535 {
536 	struct pipe_inode_info *pipe = i->pipe;
537 	size_t n, off;
538 	int idx;
539 
540 	if (!sanity(i))
541 		return 0;
542 
543 	bytes = n = push_pipe(i, bytes, &idx, &off);
544 	if (unlikely(!n))
545 		return 0;
546 	for ( ; n; idx = next_idx(idx, pipe), off = 0) {
547 		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
548 		memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
549 		i->idx = idx;
550 		i->iov_offset = off + chunk;
551 		n -= chunk;
552 		addr += chunk;
553 	}
554 	i->count -= bytes;
555 	return bytes;
556 }
557 
558 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
559 {
560 	const char *from = addr;
561 	if (unlikely(i->type & ITER_PIPE))
562 		return copy_pipe_to_iter(addr, bytes, i);
563 	if (iter_is_iovec(i))
564 		might_fault();
565 	iterate_and_advance(i, bytes, v,
566 		copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
567 		memcpy_to_page(v.bv_page, v.bv_offset,
568 			       (from += v.bv_len) - v.bv_len, v.bv_len),
569 		memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
570 	)
571 
572 	return bytes;
573 }
574 EXPORT_SYMBOL(_copy_to_iter);
575 
576 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
577 {
578 	char *to = addr;
579 	if (unlikely(i->type & ITER_PIPE)) {
580 		WARN_ON(1);
581 		return 0;
582 	}
583 	if (iter_is_iovec(i))
584 		might_fault();
585 	iterate_and_advance(i, bytes, v,
586 		copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
587 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
588 				 v.bv_offset, v.bv_len),
589 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
590 	)
591 
592 	return bytes;
593 }
594 EXPORT_SYMBOL(_copy_from_iter);
595 
596 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
597 {
598 	char *to = addr;
599 	if (unlikely(i->type & ITER_PIPE)) {
600 		WARN_ON(1);
601 		return false;
602 	}
603 	if (unlikely(i->count < bytes))
604 		return false;
605 
606 	if (iter_is_iovec(i))
607 		might_fault();
608 	iterate_all_kinds(i, bytes, v, ({
609 		if (copyin((to += v.iov_len) - v.iov_len,
610 				      v.iov_base, v.iov_len))
611 			return false;
612 		0;}),
613 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
614 				 v.bv_offset, v.bv_len),
615 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
616 	)
617 
618 	iov_iter_advance(i, bytes);
619 	return true;
620 }
621 EXPORT_SYMBOL(_copy_from_iter_full);
622 
623 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
624 {
625 	char *to = addr;
626 	if (unlikely(i->type & ITER_PIPE)) {
627 		WARN_ON(1);
628 		return 0;
629 	}
630 	iterate_and_advance(i, bytes, v,
631 		__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
632 					 v.iov_base, v.iov_len),
633 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
634 				 v.bv_offset, v.bv_len),
635 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
636 	)
637 
638 	return bytes;
639 }
640 EXPORT_SYMBOL(_copy_from_iter_nocache);
641 
642 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
643 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
644 {
645 	char *to = addr;
646 	if (unlikely(i->type & ITER_PIPE)) {
647 		WARN_ON(1);
648 		return 0;
649 	}
650 	iterate_and_advance(i, bytes, v,
651 		__copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
652 					 v.iov_base, v.iov_len),
653 		memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
654 				 v.bv_offset, v.bv_len),
655 		memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
656 			v.iov_len)
657 	)
658 
659 	return bytes;
660 }
661 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
662 #endif
663 
664 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
665 {
666 	char *to = addr;
667 	if (unlikely(i->type & ITER_PIPE)) {
668 		WARN_ON(1);
669 		return false;
670 	}
671 	if (unlikely(i->count < bytes))
672 		return false;
673 	iterate_all_kinds(i, bytes, v, ({
674 		if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
675 					     v.iov_base, v.iov_len))
676 			return false;
677 		0;}),
678 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
679 				 v.bv_offset, v.bv_len),
680 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
681 	)
682 
683 	iov_iter_advance(i, bytes);
684 	return true;
685 }
686 EXPORT_SYMBOL(_copy_from_iter_full_nocache);
687 
688 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
689 {
690 	size_t v = n + offset;
691 	if (likely(n <= v && v <= (PAGE_SIZE << compound_order(page))))
692 		return true;
693 	WARN_ON(1);
694 	return false;
695 }
696 
697 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
698 			 struct iov_iter *i)
699 {
700 	if (unlikely(!page_copy_sane(page, offset, bytes)))
701 		return 0;
702 	if (i->type & (ITER_BVEC|ITER_KVEC)) {
703 		void *kaddr = kmap_atomic(page);
704 		size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
705 		kunmap_atomic(kaddr);
706 		return wanted;
707 	} else if (likely(!(i->type & ITER_PIPE)))
708 		return copy_page_to_iter_iovec(page, offset, bytes, i);
709 	else
710 		return copy_page_to_iter_pipe(page, offset, bytes, i);
711 }
712 EXPORT_SYMBOL(copy_page_to_iter);
713 
714 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
715 			 struct iov_iter *i)
716 {
717 	if (unlikely(!page_copy_sane(page, offset, bytes)))
718 		return 0;
719 	if (unlikely(i->type & ITER_PIPE)) {
720 		WARN_ON(1);
721 		return 0;
722 	}
723 	if (i->type & (ITER_BVEC|ITER_KVEC)) {
724 		void *kaddr = kmap_atomic(page);
725 		size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
726 		kunmap_atomic(kaddr);
727 		return wanted;
728 	} else
729 		return copy_page_from_iter_iovec(page, offset, bytes, i);
730 }
731 EXPORT_SYMBOL(copy_page_from_iter);
732 
733 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
734 {
735 	struct pipe_inode_info *pipe = i->pipe;
736 	size_t n, off;
737 	int idx;
738 
739 	if (!sanity(i))
740 		return 0;
741 
742 	bytes = n = push_pipe(i, bytes, &idx, &off);
743 	if (unlikely(!n))
744 		return 0;
745 
746 	for ( ; n; idx = next_idx(idx, pipe), off = 0) {
747 		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
748 		memzero_page(pipe->bufs[idx].page, off, chunk);
749 		i->idx = idx;
750 		i->iov_offset = off + chunk;
751 		n -= chunk;
752 	}
753 	i->count -= bytes;
754 	return bytes;
755 }
756 
757 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
758 {
759 	if (unlikely(i->type & ITER_PIPE))
760 		return pipe_zero(bytes, i);
761 	iterate_and_advance(i, bytes, v,
762 		clear_user(v.iov_base, v.iov_len),
763 		memzero_page(v.bv_page, v.bv_offset, v.bv_len),
764 		memset(v.iov_base, 0, v.iov_len)
765 	)
766 
767 	return bytes;
768 }
769 EXPORT_SYMBOL(iov_iter_zero);
770 
771 size_t iov_iter_copy_from_user_atomic(struct page *page,
772 		struct iov_iter *i, unsigned long offset, size_t bytes)
773 {
774 	char *kaddr = kmap_atomic(page), *p = kaddr + offset;
775 	if (unlikely(!page_copy_sane(page, offset, bytes))) {
776 		kunmap_atomic(kaddr);
777 		return 0;
778 	}
779 	if (unlikely(i->type & ITER_PIPE)) {
780 		kunmap_atomic(kaddr);
781 		WARN_ON(1);
782 		return 0;
783 	}
784 	iterate_all_kinds(i, bytes, v,
785 		copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
786 		memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
787 				 v.bv_offset, v.bv_len),
788 		memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
789 	)
790 	kunmap_atomic(kaddr);
791 	return bytes;
792 }
793 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
794 
795 static inline void pipe_truncate(struct iov_iter *i)
796 {
797 	struct pipe_inode_info *pipe = i->pipe;
798 	if (pipe->nrbufs) {
799 		size_t off = i->iov_offset;
800 		int idx = i->idx;
801 		int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
802 		if (off) {
803 			pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
804 			idx = next_idx(idx, pipe);
805 			nrbufs++;
806 		}
807 		while (pipe->nrbufs > nrbufs) {
808 			pipe_buf_release(pipe, &pipe->bufs[idx]);
809 			idx = next_idx(idx, pipe);
810 			pipe->nrbufs--;
811 		}
812 	}
813 }
814 
815 static void pipe_advance(struct iov_iter *i, size_t size)
816 {
817 	struct pipe_inode_info *pipe = i->pipe;
818 	if (unlikely(i->count < size))
819 		size = i->count;
820 	if (size) {
821 		struct pipe_buffer *buf;
822 		size_t off = i->iov_offset, left = size;
823 		int idx = i->idx;
824 		if (off) /* make it relative to the beginning of buffer */
825 			left += off - pipe->bufs[idx].offset;
826 		while (1) {
827 			buf = &pipe->bufs[idx];
828 			if (left <= buf->len)
829 				break;
830 			left -= buf->len;
831 			idx = next_idx(idx, pipe);
832 		}
833 		i->idx = idx;
834 		i->iov_offset = buf->offset + left;
835 	}
836 	i->count -= size;
837 	/* ... and discard everything past that point */
838 	pipe_truncate(i);
839 }
840 
841 void iov_iter_advance(struct iov_iter *i, size_t size)
842 {
843 	if (unlikely(i->type & ITER_PIPE)) {
844 		pipe_advance(i, size);
845 		return;
846 	}
847 	iterate_and_advance(i, size, v, 0, 0, 0)
848 }
849 EXPORT_SYMBOL(iov_iter_advance);
850 
851 void iov_iter_revert(struct iov_iter *i, size_t unroll)
852 {
853 	if (!unroll)
854 		return;
855 	if (WARN_ON(unroll > MAX_RW_COUNT))
856 		return;
857 	i->count += unroll;
858 	if (unlikely(i->type & ITER_PIPE)) {
859 		struct pipe_inode_info *pipe = i->pipe;
860 		int idx = i->idx;
861 		size_t off = i->iov_offset;
862 		while (1) {
863 			size_t n = off - pipe->bufs[idx].offset;
864 			if (unroll < n) {
865 				off -= unroll;
866 				break;
867 			}
868 			unroll -= n;
869 			if (!unroll && idx == i->start_idx) {
870 				off = 0;
871 				break;
872 			}
873 			if (!idx--)
874 				idx = pipe->buffers - 1;
875 			off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
876 		}
877 		i->iov_offset = off;
878 		i->idx = idx;
879 		pipe_truncate(i);
880 		return;
881 	}
882 	if (unroll <= i->iov_offset) {
883 		i->iov_offset -= unroll;
884 		return;
885 	}
886 	unroll -= i->iov_offset;
887 	if (i->type & ITER_BVEC) {
888 		const struct bio_vec *bvec = i->bvec;
889 		while (1) {
890 			size_t n = (--bvec)->bv_len;
891 			i->nr_segs++;
892 			if (unroll <= n) {
893 				i->bvec = bvec;
894 				i->iov_offset = n - unroll;
895 				return;
896 			}
897 			unroll -= n;
898 		}
899 	} else { /* same logics for iovec and kvec */
900 		const struct iovec *iov = i->iov;
901 		while (1) {
902 			size_t n = (--iov)->iov_len;
903 			i->nr_segs++;
904 			if (unroll <= n) {
905 				i->iov = iov;
906 				i->iov_offset = n - unroll;
907 				return;
908 			}
909 			unroll -= n;
910 		}
911 	}
912 }
913 EXPORT_SYMBOL(iov_iter_revert);
914 
915 /*
916  * Return the count of just the current iov_iter segment.
917  */
918 size_t iov_iter_single_seg_count(const struct iov_iter *i)
919 {
920 	if (unlikely(i->type & ITER_PIPE))
921 		return i->count;	// it is a silly place, anyway
922 	if (i->nr_segs == 1)
923 		return i->count;
924 	else if (i->type & ITER_BVEC)
925 		return min(i->count, i->bvec->bv_len - i->iov_offset);
926 	else
927 		return min(i->count, i->iov->iov_len - i->iov_offset);
928 }
929 EXPORT_SYMBOL(iov_iter_single_seg_count);
930 
931 void iov_iter_kvec(struct iov_iter *i, int direction,
932 			const struct kvec *kvec, unsigned long nr_segs,
933 			size_t count)
934 {
935 	BUG_ON(!(direction & ITER_KVEC));
936 	i->type = direction;
937 	i->kvec = kvec;
938 	i->nr_segs = nr_segs;
939 	i->iov_offset = 0;
940 	i->count = count;
941 }
942 EXPORT_SYMBOL(iov_iter_kvec);
943 
944 void iov_iter_bvec(struct iov_iter *i, int direction,
945 			const struct bio_vec *bvec, unsigned long nr_segs,
946 			size_t count)
947 {
948 	BUG_ON(!(direction & ITER_BVEC));
949 	i->type = direction;
950 	i->bvec = bvec;
951 	i->nr_segs = nr_segs;
952 	i->iov_offset = 0;
953 	i->count = count;
954 }
955 EXPORT_SYMBOL(iov_iter_bvec);
956 
957 void iov_iter_pipe(struct iov_iter *i, int direction,
958 			struct pipe_inode_info *pipe,
959 			size_t count)
960 {
961 	BUG_ON(direction != ITER_PIPE);
962 	WARN_ON(pipe->nrbufs == pipe->buffers);
963 	i->type = direction;
964 	i->pipe = pipe;
965 	i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
966 	i->iov_offset = 0;
967 	i->count = count;
968 	i->start_idx = i->idx;
969 }
970 EXPORT_SYMBOL(iov_iter_pipe);
971 
972 unsigned long iov_iter_alignment(const struct iov_iter *i)
973 {
974 	unsigned long res = 0;
975 	size_t size = i->count;
976 
977 	if (unlikely(i->type & ITER_PIPE)) {
978 		if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
979 			return size | i->iov_offset;
980 		return size;
981 	}
982 	iterate_all_kinds(i, size, v,
983 		(res |= (unsigned long)v.iov_base | v.iov_len, 0),
984 		res |= v.bv_offset | v.bv_len,
985 		res |= (unsigned long)v.iov_base | v.iov_len
986 	)
987 	return res;
988 }
989 EXPORT_SYMBOL(iov_iter_alignment);
990 
991 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
992 {
993 	unsigned long res = 0;
994 	size_t size = i->count;
995 
996 	if (unlikely(i->type & ITER_PIPE)) {
997 		WARN_ON(1);
998 		return ~0U;
999 	}
1000 
1001 	iterate_all_kinds(i, size, v,
1002 		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
1003 			(size != v.iov_len ? size : 0), 0),
1004 		(res |= (!res ? 0 : (unsigned long)v.bv_offset) |
1005 			(size != v.bv_len ? size : 0)),
1006 		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
1007 			(size != v.iov_len ? size : 0))
1008 		);
1009 	return res;
1010 }
1011 EXPORT_SYMBOL(iov_iter_gap_alignment);
1012 
1013 static inline size_t __pipe_get_pages(struct iov_iter *i,
1014 				size_t maxsize,
1015 				struct page **pages,
1016 				int idx,
1017 				size_t *start)
1018 {
1019 	struct pipe_inode_info *pipe = i->pipe;
1020 	ssize_t n = push_pipe(i, maxsize, &idx, start);
1021 	if (!n)
1022 		return -EFAULT;
1023 
1024 	maxsize = n;
1025 	n += *start;
1026 	while (n > 0) {
1027 		get_page(*pages++ = pipe->bufs[idx].page);
1028 		idx = next_idx(idx, pipe);
1029 		n -= PAGE_SIZE;
1030 	}
1031 
1032 	return maxsize;
1033 }
1034 
1035 static ssize_t pipe_get_pages(struct iov_iter *i,
1036 		   struct page **pages, size_t maxsize, unsigned maxpages,
1037 		   size_t *start)
1038 {
1039 	unsigned npages;
1040 	size_t capacity;
1041 	int idx;
1042 
1043 	if (!maxsize)
1044 		return 0;
1045 
1046 	if (!sanity(i))
1047 		return -EFAULT;
1048 
1049 	data_start(i, &idx, start);
1050 	/* some of this one + all after this one */
1051 	npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
1052 	capacity = min(npages,maxpages) * PAGE_SIZE - *start;
1053 
1054 	return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
1055 }
1056 
1057 ssize_t iov_iter_get_pages(struct iov_iter *i,
1058 		   struct page **pages, size_t maxsize, unsigned maxpages,
1059 		   size_t *start)
1060 {
1061 	if (maxsize > i->count)
1062 		maxsize = i->count;
1063 
1064 	if (unlikely(i->type & ITER_PIPE))
1065 		return pipe_get_pages(i, pages, maxsize, maxpages, start);
1066 	iterate_all_kinds(i, maxsize, v, ({
1067 		unsigned long addr = (unsigned long)v.iov_base;
1068 		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1069 		int n;
1070 		int res;
1071 
1072 		if (len > maxpages * PAGE_SIZE)
1073 			len = maxpages * PAGE_SIZE;
1074 		addr &= ~(PAGE_SIZE - 1);
1075 		n = DIV_ROUND_UP(len, PAGE_SIZE);
1076 		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
1077 		if (unlikely(res < 0))
1078 			return res;
1079 		return (res == n ? len : res * PAGE_SIZE) - *start;
1080 	0;}),({
1081 		/* can't be more than PAGE_SIZE */
1082 		*start = v.bv_offset;
1083 		get_page(*pages = v.bv_page);
1084 		return v.bv_len;
1085 	}),({
1086 		return -EFAULT;
1087 	})
1088 	)
1089 	return 0;
1090 }
1091 EXPORT_SYMBOL(iov_iter_get_pages);
1092 
1093 static struct page **get_pages_array(size_t n)
1094 {
1095 	return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1096 }
1097 
1098 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1099 		   struct page ***pages, size_t maxsize,
1100 		   size_t *start)
1101 {
1102 	struct page **p;
1103 	size_t n;
1104 	int idx;
1105 	int npages;
1106 
1107 	if (!maxsize)
1108 		return 0;
1109 
1110 	if (!sanity(i))
1111 		return -EFAULT;
1112 
1113 	data_start(i, &idx, start);
1114 	/* some of this one + all after this one */
1115 	npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
1116 	n = npages * PAGE_SIZE - *start;
1117 	if (maxsize > n)
1118 		maxsize = n;
1119 	else
1120 		npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1121 	p = get_pages_array(npages);
1122 	if (!p)
1123 		return -ENOMEM;
1124 	n = __pipe_get_pages(i, maxsize, p, idx, start);
1125 	if (n > 0)
1126 		*pages = p;
1127 	else
1128 		kvfree(p);
1129 	return n;
1130 }
1131 
1132 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1133 		   struct page ***pages, size_t maxsize,
1134 		   size_t *start)
1135 {
1136 	struct page **p;
1137 
1138 	if (maxsize > i->count)
1139 		maxsize = i->count;
1140 
1141 	if (unlikely(i->type & ITER_PIPE))
1142 		return pipe_get_pages_alloc(i, pages, maxsize, start);
1143 	iterate_all_kinds(i, maxsize, v, ({
1144 		unsigned long addr = (unsigned long)v.iov_base;
1145 		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1146 		int n;
1147 		int res;
1148 
1149 		addr &= ~(PAGE_SIZE - 1);
1150 		n = DIV_ROUND_UP(len, PAGE_SIZE);
1151 		p = get_pages_array(n);
1152 		if (!p)
1153 			return -ENOMEM;
1154 		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
1155 		if (unlikely(res < 0)) {
1156 			kvfree(p);
1157 			return res;
1158 		}
1159 		*pages = p;
1160 		return (res == n ? len : res * PAGE_SIZE) - *start;
1161 	0;}),({
1162 		/* can't be more than PAGE_SIZE */
1163 		*start = v.bv_offset;
1164 		*pages = p = get_pages_array(1);
1165 		if (!p)
1166 			return -ENOMEM;
1167 		get_page(*p = v.bv_page);
1168 		return v.bv_len;
1169 	}),({
1170 		return -EFAULT;
1171 	})
1172 	)
1173 	return 0;
1174 }
1175 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1176 
1177 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1178 			       struct iov_iter *i)
1179 {
1180 	char *to = addr;
1181 	__wsum sum, next;
1182 	size_t off = 0;
1183 	sum = *csum;
1184 	if (unlikely(i->type & ITER_PIPE)) {
1185 		WARN_ON(1);
1186 		return 0;
1187 	}
1188 	iterate_and_advance(i, bytes, v, ({
1189 		int err = 0;
1190 		next = csum_and_copy_from_user(v.iov_base,
1191 					       (to += v.iov_len) - v.iov_len,
1192 					       v.iov_len, 0, &err);
1193 		if (!err) {
1194 			sum = csum_block_add(sum, next, off);
1195 			off += v.iov_len;
1196 		}
1197 		err ? v.iov_len : 0;
1198 	}), ({
1199 		char *p = kmap_atomic(v.bv_page);
1200 		next = csum_partial_copy_nocheck(p + v.bv_offset,
1201 						 (to += v.bv_len) - v.bv_len,
1202 						 v.bv_len, 0);
1203 		kunmap_atomic(p);
1204 		sum = csum_block_add(sum, next, off);
1205 		off += v.bv_len;
1206 	}),({
1207 		next = csum_partial_copy_nocheck(v.iov_base,
1208 						 (to += v.iov_len) - v.iov_len,
1209 						 v.iov_len, 0);
1210 		sum = csum_block_add(sum, next, off);
1211 		off += v.iov_len;
1212 	})
1213 	)
1214 	*csum = sum;
1215 	return bytes;
1216 }
1217 EXPORT_SYMBOL(csum_and_copy_from_iter);
1218 
1219 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1220 			       struct iov_iter *i)
1221 {
1222 	char *to = addr;
1223 	__wsum sum, next;
1224 	size_t off = 0;
1225 	sum = *csum;
1226 	if (unlikely(i->type & ITER_PIPE)) {
1227 		WARN_ON(1);
1228 		return false;
1229 	}
1230 	if (unlikely(i->count < bytes))
1231 		return false;
1232 	iterate_all_kinds(i, bytes, v, ({
1233 		int err = 0;
1234 		next = csum_and_copy_from_user(v.iov_base,
1235 					       (to += v.iov_len) - v.iov_len,
1236 					       v.iov_len, 0, &err);
1237 		if (err)
1238 			return false;
1239 		sum = csum_block_add(sum, next, off);
1240 		off += v.iov_len;
1241 		0;
1242 	}), ({
1243 		char *p = kmap_atomic(v.bv_page);
1244 		next = csum_partial_copy_nocheck(p + v.bv_offset,
1245 						 (to += v.bv_len) - v.bv_len,
1246 						 v.bv_len, 0);
1247 		kunmap_atomic(p);
1248 		sum = csum_block_add(sum, next, off);
1249 		off += v.bv_len;
1250 	}),({
1251 		next = csum_partial_copy_nocheck(v.iov_base,
1252 						 (to += v.iov_len) - v.iov_len,
1253 						 v.iov_len, 0);
1254 		sum = csum_block_add(sum, next, off);
1255 		off += v.iov_len;
1256 	})
1257 	)
1258 	*csum = sum;
1259 	iov_iter_advance(i, bytes);
1260 	return true;
1261 }
1262 EXPORT_SYMBOL(csum_and_copy_from_iter_full);
1263 
1264 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
1265 			     struct iov_iter *i)
1266 {
1267 	const char *from = addr;
1268 	__wsum sum, next;
1269 	size_t off = 0;
1270 	sum = *csum;
1271 	if (unlikely(i->type & ITER_PIPE)) {
1272 		WARN_ON(1);	/* for now */
1273 		return 0;
1274 	}
1275 	iterate_and_advance(i, bytes, v, ({
1276 		int err = 0;
1277 		next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1278 					     v.iov_base,
1279 					     v.iov_len, 0, &err);
1280 		if (!err) {
1281 			sum = csum_block_add(sum, next, off);
1282 			off += v.iov_len;
1283 		}
1284 		err ? v.iov_len : 0;
1285 	}), ({
1286 		char *p = kmap_atomic(v.bv_page);
1287 		next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
1288 						 p + v.bv_offset,
1289 						 v.bv_len, 0);
1290 		kunmap_atomic(p);
1291 		sum = csum_block_add(sum, next, off);
1292 		off += v.bv_len;
1293 	}),({
1294 		next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
1295 						 v.iov_base,
1296 						 v.iov_len, 0);
1297 		sum = csum_block_add(sum, next, off);
1298 		off += v.iov_len;
1299 	})
1300 	)
1301 	*csum = sum;
1302 	return bytes;
1303 }
1304 EXPORT_SYMBOL(csum_and_copy_to_iter);
1305 
1306 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1307 {
1308 	size_t size = i->count;
1309 	int npages = 0;
1310 
1311 	if (!size)
1312 		return 0;
1313 
1314 	if (unlikely(i->type & ITER_PIPE)) {
1315 		struct pipe_inode_info *pipe = i->pipe;
1316 		size_t off;
1317 		int idx;
1318 
1319 		if (!sanity(i))
1320 			return 0;
1321 
1322 		data_start(i, &idx, &off);
1323 		/* some of this one + all after this one */
1324 		npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
1325 		if (npages >= maxpages)
1326 			return maxpages;
1327 	} else iterate_all_kinds(i, size, v, ({
1328 		unsigned long p = (unsigned long)v.iov_base;
1329 		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1330 			- p / PAGE_SIZE;
1331 		if (npages >= maxpages)
1332 			return maxpages;
1333 	0;}),({
1334 		npages++;
1335 		if (npages >= maxpages)
1336 			return maxpages;
1337 	}),({
1338 		unsigned long p = (unsigned long)v.iov_base;
1339 		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1340 			- p / PAGE_SIZE;
1341 		if (npages >= maxpages)
1342 			return maxpages;
1343 	})
1344 	)
1345 	return npages;
1346 }
1347 EXPORT_SYMBOL(iov_iter_npages);
1348 
1349 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1350 {
1351 	*new = *old;
1352 	if (unlikely(new->type & ITER_PIPE)) {
1353 		WARN_ON(1);
1354 		return NULL;
1355 	}
1356 	if (new->type & ITER_BVEC)
1357 		return new->bvec = kmemdup(new->bvec,
1358 				    new->nr_segs * sizeof(struct bio_vec),
1359 				    flags);
1360 	else
1361 		/* iovec and kvec have identical layout */
1362 		return new->iov = kmemdup(new->iov,
1363 				   new->nr_segs * sizeof(struct iovec),
1364 				   flags);
1365 }
1366 EXPORT_SYMBOL(dup_iter);
1367 
1368 /**
1369  * import_iovec() - Copy an array of &struct iovec from userspace
1370  *     into the kernel, check that it is valid, and initialize a new
1371  *     &struct iov_iter iterator to access it.
1372  *
1373  * @type: One of %READ or %WRITE.
1374  * @uvector: Pointer to the userspace array.
1375  * @nr_segs: Number of elements in userspace array.
1376  * @fast_segs: Number of elements in @iov.
1377  * @iov: (input and output parameter) Pointer to pointer to (usually small
1378  *     on-stack) kernel array.
1379  * @i: Pointer to iterator that will be initialized on success.
1380  *
1381  * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1382  * then this function places %NULL in *@iov on return. Otherwise, a new
1383  * array will be allocated and the result placed in *@iov. This means that
1384  * the caller may call kfree() on *@iov regardless of whether the small
1385  * on-stack array was used or not (and regardless of whether this function
1386  * returns an error or not).
1387  *
1388  * Return: 0 on success or negative error code on error.
1389  */
1390 int import_iovec(int type, const struct iovec __user * uvector,
1391 		 unsigned nr_segs, unsigned fast_segs,
1392 		 struct iovec **iov, struct iov_iter *i)
1393 {
1394 	ssize_t n;
1395 	struct iovec *p;
1396 	n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1397 				  *iov, &p);
1398 	if (n < 0) {
1399 		if (p != *iov)
1400 			kfree(p);
1401 		*iov = NULL;
1402 		return n;
1403 	}
1404 	iov_iter_init(i, type, p, nr_segs, n);
1405 	*iov = p == *iov ? NULL : p;
1406 	return 0;
1407 }
1408 EXPORT_SYMBOL(import_iovec);
1409 
1410 #ifdef CONFIG_COMPAT
1411 #include <linux/compat.h>
1412 
1413 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
1414 		 unsigned nr_segs, unsigned fast_segs,
1415 		 struct iovec **iov, struct iov_iter *i)
1416 {
1417 	ssize_t n;
1418 	struct iovec *p;
1419 	n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1420 				  *iov, &p);
1421 	if (n < 0) {
1422 		if (p != *iov)
1423 			kfree(p);
1424 		*iov = NULL;
1425 		return n;
1426 	}
1427 	iov_iter_init(i, type, p, nr_segs, n);
1428 	*iov = p == *iov ? NULL : p;
1429 	return 0;
1430 }
1431 #endif
1432 
1433 int import_single_range(int rw, void __user *buf, size_t len,
1434 		 struct iovec *iov, struct iov_iter *i)
1435 {
1436 	if (len > MAX_RW_COUNT)
1437 		len = MAX_RW_COUNT;
1438 	if (unlikely(!access_ok(!rw, buf, len)))
1439 		return -EFAULT;
1440 
1441 	iov->iov_base = buf;
1442 	iov->iov_len = len;
1443 	iov_iter_init(i, rw, iov, 1, len);
1444 	return 0;
1445 }
1446 EXPORT_SYMBOL(import_single_range);
1447