xref: /openbmc/linux/lib/iov_iter.c (revision f7d84fa7)
1 #include <linux/export.h>
2 #include <linux/bvec.h>
3 #include <linux/uio.h>
4 #include <linux/pagemap.h>
5 #include <linux/slab.h>
6 #include <linux/vmalloc.h>
7 #include <linux/splice.h>
8 #include <net/checksum.h>
9 
10 #define PIPE_PARANOIA /* for now */
11 
12 #define iterate_iovec(i, n, __v, __p, skip, STEP) {	\
13 	size_t left;					\
14 	size_t wanted = n;				\
15 	__p = i->iov;					\
16 	__v.iov_len = min(n, __p->iov_len - skip);	\
17 	if (likely(__v.iov_len)) {			\
18 		__v.iov_base = __p->iov_base + skip;	\
19 		left = (STEP);				\
20 		__v.iov_len -= left;			\
21 		skip += __v.iov_len;			\
22 		n -= __v.iov_len;			\
23 	} else {					\
24 		left = 0;				\
25 	}						\
26 	while (unlikely(!left && n)) {			\
27 		__p++;					\
28 		__v.iov_len = min(n, __p->iov_len);	\
29 		if (unlikely(!__v.iov_len))		\
30 			continue;			\
31 		__v.iov_base = __p->iov_base;		\
32 		left = (STEP);				\
33 		__v.iov_len -= left;			\
34 		skip = __v.iov_len;			\
35 		n -= __v.iov_len;			\
36 	}						\
37 	n = wanted - n;					\
38 }
39 
40 #define iterate_kvec(i, n, __v, __p, skip, STEP) {	\
41 	size_t wanted = n;				\
42 	__p = i->kvec;					\
43 	__v.iov_len = min(n, __p->iov_len - skip);	\
44 	if (likely(__v.iov_len)) {			\
45 		__v.iov_base = __p->iov_base + skip;	\
46 		(void)(STEP);				\
47 		skip += __v.iov_len;			\
48 		n -= __v.iov_len;			\
49 	}						\
50 	while (unlikely(n)) {				\
51 		__p++;					\
52 		__v.iov_len = min(n, __p->iov_len);	\
53 		if (unlikely(!__v.iov_len))		\
54 			continue;			\
55 		__v.iov_base = __p->iov_base;		\
56 		(void)(STEP);				\
57 		skip = __v.iov_len;			\
58 		n -= __v.iov_len;			\
59 	}						\
60 	n = wanted;					\
61 }
62 
63 #define iterate_bvec(i, n, __v, __bi, skip, STEP) {	\
64 	struct bvec_iter __start;			\
65 	__start.bi_size = n;				\
66 	__start.bi_bvec_done = skip;			\
67 	__start.bi_idx = 0;				\
68 	for_each_bvec(__v, i->bvec, __bi, __start) {	\
69 		if (!__v.bv_len)			\
70 			continue;			\
71 		(void)(STEP);				\
72 	}						\
73 }
74 
75 #define iterate_all_kinds(i, n, v, I, B, K) {			\
76 	if (likely(n)) {					\
77 		size_t skip = i->iov_offset;			\
78 		if (unlikely(i->type & ITER_BVEC)) {		\
79 			struct bio_vec v;			\
80 			struct bvec_iter __bi;			\
81 			iterate_bvec(i, n, v, __bi, skip, (B))	\
82 		} else if (unlikely(i->type & ITER_KVEC)) {	\
83 			const struct kvec *kvec;		\
84 			struct kvec v;				\
85 			iterate_kvec(i, n, v, kvec, skip, (K))	\
86 		} else {					\
87 			const struct iovec *iov;		\
88 			struct iovec v;				\
89 			iterate_iovec(i, n, v, iov, skip, (I))	\
90 		}						\
91 	}							\
92 }
93 
94 #define iterate_and_advance(i, n, v, I, B, K) {			\
95 	if (unlikely(i->count < n))				\
96 		n = i->count;					\
97 	if (i->count) {						\
98 		size_t skip = i->iov_offset;			\
99 		if (unlikely(i->type & ITER_BVEC)) {		\
100 			const struct bio_vec *bvec = i->bvec;	\
101 			struct bio_vec v;			\
102 			struct bvec_iter __bi;			\
103 			iterate_bvec(i, n, v, __bi, skip, (B))	\
104 			i->bvec = __bvec_iter_bvec(i->bvec, __bi);	\
105 			i->nr_segs -= i->bvec - bvec;		\
106 			skip = __bi.bi_bvec_done;		\
107 		} else if (unlikely(i->type & ITER_KVEC)) {	\
108 			const struct kvec *kvec;		\
109 			struct kvec v;				\
110 			iterate_kvec(i, n, v, kvec, skip, (K))	\
111 			if (skip == kvec->iov_len) {		\
112 				kvec++;				\
113 				skip = 0;			\
114 			}					\
115 			i->nr_segs -= kvec - i->kvec;		\
116 			i->kvec = kvec;				\
117 		} else {					\
118 			const struct iovec *iov;		\
119 			struct iovec v;				\
120 			iterate_iovec(i, n, v, iov, skip, (I))	\
121 			if (skip == iov->iov_len) {		\
122 				iov++;				\
123 				skip = 0;			\
124 			}					\
125 			i->nr_segs -= iov - i->iov;		\
126 			i->iov = iov;				\
127 		}						\
128 		i->count -= n;					\
129 		i->iov_offset = skip;				\
130 	}							\
131 }
132 
133 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
134 			 struct iov_iter *i)
135 {
136 	size_t skip, copy, left, wanted;
137 	const struct iovec *iov;
138 	char __user *buf;
139 	void *kaddr, *from;
140 
141 	if (unlikely(bytes > i->count))
142 		bytes = i->count;
143 
144 	if (unlikely(!bytes))
145 		return 0;
146 
147 	wanted = bytes;
148 	iov = i->iov;
149 	skip = i->iov_offset;
150 	buf = iov->iov_base + skip;
151 	copy = min(bytes, iov->iov_len - skip);
152 
153 	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
154 		kaddr = kmap_atomic(page);
155 		from = kaddr + offset;
156 
157 		/* first chunk, usually the only one */
158 		left = __copy_to_user_inatomic(buf, from, copy);
159 		copy -= left;
160 		skip += copy;
161 		from += copy;
162 		bytes -= copy;
163 
164 		while (unlikely(!left && bytes)) {
165 			iov++;
166 			buf = iov->iov_base;
167 			copy = min(bytes, iov->iov_len);
168 			left = __copy_to_user_inatomic(buf, from, copy);
169 			copy -= left;
170 			skip = copy;
171 			from += copy;
172 			bytes -= copy;
173 		}
174 		if (likely(!bytes)) {
175 			kunmap_atomic(kaddr);
176 			goto done;
177 		}
178 		offset = from - kaddr;
179 		buf += copy;
180 		kunmap_atomic(kaddr);
181 		copy = min(bytes, iov->iov_len - skip);
182 	}
183 	/* Too bad - revert to non-atomic kmap */
184 
185 	kaddr = kmap(page);
186 	from = kaddr + offset;
187 	left = __copy_to_user(buf, from, copy);
188 	copy -= left;
189 	skip += copy;
190 	from += copy;
191 	bytes -= copy;
192 	while (unlikely(!left && bytes)) {
193 		iov++;
194 		buf = iov->iov_base;
195 		copy = min(bytes, iov->iov_len);
196 		left = __copy_to_user(buf, from, copy);
197 		copy -= left;
198 		skip = copy;
199 		from += copy;
200 		bytes -= copy;
201 	}
202 	kunmap(page);
203 
204 done:
205 	if (skip == iov->iov_len) {
206 		iov++;
207 		skip = 0;
208 	}
209 	i->count -= wanted - bytes;
210 	i->nr_segs -= iov - i->iov;
211 	i->iov = iov;
212 	i->iov_offset = skip;
213 	return wanted - bytes;
214 }
215 
216 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
217 			 struct iov_iter *i)
218 {
219 	size_t skip, copy, left, wanted;
220 	const struct iovec *iov;
221 	char __user *buf;
222 	void *kaddr, *to;
223 
224 	if (unlikely(bytes > i->count))
225 		bytes = i->count;
226 
227 	if (unlikely(!bytes))
228 		return 0;
229 
230 	wanted = bytes;
231 	iov = i->iov;
232 	skip = i->iov_offset;
233 	buf = iov->iov_base + skip;
234 	copy = min(bytes, iov->iov_len - skip);
235 
236 	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
237 		kaddr = kmap_atomic(page);
238 		to = kaddr + offset;
239 
240 		/* first chunk, usually the only one */
241 		left = __copy_from_user_inatomic(to, buf, copy);
242 		copy -= left;
243 		skip += copy;
244 		to += copy;
245 		bytes -= copy;
246 
247 		while (unlikely(!left && bytes)) {
248 			iov++;
249 			buf = iov->iov_base;
250 			copy = min(bytes, iov->iov_len);
251 			left = __copy_from_user_inatomic(to, buf, copy);
252 			copy -= left;
253 			skip = copy;
254 			to += copy;
255 			bytes -= copy;
256 		}
257 		if (likely(!bytes)) {
258 			kunmap_atomic(kaddr);
259 			goto done;
260 		}
261 		offset = to - kaddr;
262 		buf += copy;
263 		kunmap_atomic(kaddr);
264 		copy = min(bytes, iov->iov_len - skip);
265 	}
266 	/* Too bad - revert to non-atomic kmap */
267 
268 	kaddr = kmap(page);
269 	to = kaddr + offset;
270 	left = __copy_from_user(to, buf, copy);
271 	copy -= left;
272 	skip += copy;
273 	to += copy;
274 	bytes -= copy;
275 	while (unlikely(!left && bytes)) {
276 		iov++;
277 		buf = iov->iov_base;
278 		copy = min(bytes, iov->iov_len);
279 		left = __copy_from_user(to, buf, copy);
280 		copy -= left;
281 		skip = copy;
282 		to += copy;
283 		bytes -= copy;
284 	}
285 	kunmap(page);
286 
287 done:
288 	if (skip == iov->iov_len) {
289 		iov++;
290 		skip = 0;
291 	}
292 	i->count -= wanted - bytes;
293 	i->nr_segs -= iov - i->iov;
294 	i->iov = iov;
295 	i->iov_offset = skip;
296 	return wanted - bytes;
297 }
298 
299 #ifdef PIPE_PARANOIA
300 static bool sanity(const struct iov_iter *i)
301 {
302 	struct pipe_inode_info *pipe = i->pipe;
303 	int idx = i->idx;
304 	int next = pipe->curbuf + pipe->nrbufs;
305 	if (i->iov_offset) {
306 		struct pipe_buffer *p;
307 		if (unlikely(!pipe->nrbufs))
308 			goto Bad;	// pipe must be non-empty
309 		if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
310 			goto Bad;	// must be at the last buffer...
311 
312 		p = &pipe->bufs[idx];
313 		if (unlikely(p->offset + p->len != i->iov_offset))
314 			goto Bad;	// ... at the end of segment
315 	} else {
316 		if (idx != (next & (pipe->buffers - 1)))
317 			goto Bad;	// must be right after the last buffer
318 	}
319 	return true;
320 Bad:
321 	printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
322 	printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
323 			pipe->curbuf, pipe->nrbufs, pipe->buffers);
324 	for (idx = 0; idx < pipe->buffers; idx++)
325 		printk(KERN_ERR "[%p %p %d %d]\n",
326 			pipe->bufs[idx].ops,
327 			pipe->bufs[idx].page,
328 			pipe->bufs[idx].offset,
329 			pipe->bufs[idx].len);
330 	WARN_ON(1);
331 	return false;
332 }
333 #else
334 #define sanity(i) true
335 #endif
336 
337 static inline int next_idx(int idx, struct pipe_inode_info *pipe)
338 {
339 	return (idx + 1) & (pipe->buffers - 1);
340 }
341 
342 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
343 			 struct iov_iter *i)
344 {
345 	struct pipe_inode_info *pipe = i->pipe;
346 	struct pipe_buffer *buf;
347 	size_t off;
348 	int idx;
349 
350 	if (unlikely(bytes > i->count))
351 		bytes = i->count;
352 
353 	if (unlikely(!bytes))
354 		return 0;
355 
356 	if (!sanity(i))
357 		return 0;
358 
359 	off = i->iov_offset;
360 	idx = i->idx;
361 	buf = &pipe->bufs[idx];
362 	if (off) {
363 		if (offset == off && buf->page == page) {
364 			/* merge with the last one */
365 			buf->len += bytes;
366 			i->iov_offset += bytes;
367 			goto out;
368 		}
369 		idx = next_idx(idx, pipe);
370 		buf = &pipe->bufs[idx];
371 	}
372 	if (idx == pipe->curbuf && pipe->nrbufs)
373 		return 0;
374 	pipe->nrbufs++;
375 	buf->ops = &page_cache_pipe_buf_ops;
376 	get_page(buf->page = page);
377 	buf->offset = offset;
378 	buf->len = bytes;
379 	i->iov_offset = offset + bytes;
380 	i->idx = idx;
381 out:
382 	i->count -= bytes;
383 	return bytes;
384 }
385 
386 /*
387  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
388  * bytes.  For each iovec, fault in each page that constitutes the iovec.
389  *
390  * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
391  * because it is an invalid address).
392  */
393 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
394 {
395 	size_t skip = i->iov_offset;
396 	const struct iovec *iov;
397 	int err;
398 	struct iovec v;
399 
400 	if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
401 		iterate_iovec(i, bytes, v, iov, skip, ({
402 			err = fault_in_pages_readable(v.iov_base, v.iov_len);
403 			if (unlikely(err))
404 			return err;
405 		0;}))
406 	}
407 	return 0;
408 }
409 EXPORT_SYMBOL(iov_iter_fault_in_readable);
410 
411 void iov_iter_init(struct iov_iter *i, int direction,
412 			const struct iovec *iov, unsigned long nr_segs,
413 			size_t count)
414 {
415 	/* It will get better.  Eventually... */
416 	if (uaccess_kernel()) {
417 		direction |= ITER_KVEC;
418 		i->type = direction;
419 		i->kvec = (struct kvec *)iov;
420 	} else {
421 		i->type = direction;
422 		i->iov = iov;
423 	}
424 	i->nr_segs = nr_segs;
425 	i->iov_offset = 0;
426 	i->count = count;
427 }
428 EXPORT_SYMBOL(iov_iter_init);
429 
430 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
431 {
432 	char *from = kmap_atomic(page);
433 	memcpy(to, from + offset, len);
434 	kunmap_atomic(from);
435 }
436 
437 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
438 {
439 	char *to = kmap_atomic(page);
440 	memcpy(to + offset, from, len);
441 	kunmap_atomic(to);
442 }
443 
444 static void memzero_page(struct page *page, size_t offset, size_t len)
445 {
446 	char *addr = kmap_atomic(page);
447 	memset(addr + offset, 0, len);
448 	kunmap_atomic(addr);
449 }
450 
451 static inline bool allocated(struct pipe_buffer *buf)
452 {
453 	return buf->ops == &default_pipe_buf_ops;
454 }
455 
456 static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
457 {
458 	size_t off = i->iov_offset;
459 	int idx = i->idx;
460 	if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
461 		idx = next_idx(idx, i->pipe);
462 		off = 0;
463 	}
464 	*idxp = idx;
465 	*offp = off;
466 }
467 
468 static size_t push_pipe(struct iov_iter *i, size_t size,
469 			int *idxp, size_t *offp)
470 {
471 	struct pipe_inode_info *pipe = i->pipe;
472 	size_t off;
473 	int idx;
474 	ssize_t left;
475 
476 	if (unlikely(size > i->count))
477 		size = i->count;
478 	if (unlikely(!size))
479 		return 0;
480 
481 	left = size;
482 	data_start(i, &idx, &off);
483 	*idxp = idx;
484 	*offp = off;
485 	if (off) {
486 		left -= PAGE_SIZE - off;
487 		if (left <= 0) {
488 			pipe->bufs[idx].len += size;
489 			return size;
490 		}
491 		pipe->bufs[idx].len = PAGE_SIZE;
492 		idx = next_idx(idx, pipe);
493 	}
494 	while (idx != pipe->curbuf || !pipe->nrbufs) {
495 		struct page *page = alloc_page(GFP_USER);
496 		if (!page)
497 			break;
498 		pipe->nrbufs++;
499 		pipe->bufs[idx].ops = &default_pipe_buf_ops;
500 		pipe->bufs[idx].page = page;
501 		pipe->bufs[idx].offset = 0;
502 		if (left <= PAGE_SIZE) {
503 			pipe->bufs[idx].len = left;
504 			return size;
505 		}
506 		pipe->bufs[idx].len = PAGE_SIZE;
507 		left -= PAGE_SIZE;
508 		idx = next_idx(idx, pipe);
509 	}
510 	return size - left;
511 }
512 
513 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
514 				struct iov_iter *i)
515 {
516 	struct pipe_inode_info *pipe = i->pipe;
517 	size_t n, off;
518 	int idx;
519 
520 	if (!sanity(i))
521 		return 0;
522 
523 	bytes = n = push_pipe(i, bytes, &idx, &off);
524 	if (unlikely(!n))
525 		return 0;
526 	for ( ; n; idx = next_idx(idx, pipe), off = 0) {
527 		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
528 		memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
529 		i->idx = idx;
530 		i->iov_offset = off + chunk;
531 		n -= chunk;
532 		addr += chunk;
533 	}
534 	i->count -= bytes;
535 	return bytes;
536 }
537 
538 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
539 {
540 	const char *from = addr;
541 	if (unlikely(i->type & ITER_PIPE))
542 		return copy_pipe_to_iter(addr, bytes, i);
543 	iterate_and_advance(i, bytes, v,
544 		__copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
545 			       v.iov_len),
546 		memcpy_to_page(v.bv_page, v.bv_offset,
547 			       (from += v.bv_len) - v.bv_len, v.bv_len),
548 		memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
549 	)
550 
551 	return bytes;
552 }
553 EXPORT_SYMBOL(copy_to_iter);
554 
555 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
556 {
557 	char *to = addr;
558 	if (unlikely(i->type & ITER_PIPE)) {
559 		WARN_ON(1);
560 		return 0;
561 	}
562 	iterate_and_advance(i, bytes, v,
563 		__copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
564 				 v.iov_len),
565 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
566 				 v.bv_offset, v.bv_len),
567 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
568 	)
569 
570 	return bytes;
571 }
572 EXPORT_SYMBOL(copy_from_iter);
573 
574 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
575 {
576 	char *to = addr;
577 	if (unlikely(i->type & ITER_PIPE)) {
578 		WARN_ON(1);
579 		return false;
580 	}
581 	if (unlikely(i->count < bytes))
582 		return false;
583 
584 	iterate_all_kinds(i, bytes, v, ({
585 		if (__copy_from_user((to += v.iov_len) - v.iov_len,
586 				      v.iov_base, v.iov_len))
587 			return false;
588 		0;}),
589 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
590 				 v.bv_offset, v.bv_len),
591 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
592 	)
593 
594 	iov_iter_advance(i, bytes);
595 	return true;
596 }
597 EXPORT_SYMBOL(copy_from_iter_full);
598 
599 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
600 {
601 	char *to = addr;
602 	if (unlikely(i->type & ITER_PIPE)) {
603 		WARN_ON(1);
604 		return 0;
605 	}
606 	iterate_and_advance(i, bytes, v,
607 		__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
608 					 v.iov_base, v.iov_len),
609 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
610 				 v.bv_offset, v.bv_len),
611 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
612 	)
613 
614 	return bytes;
615 }
616 EXPORT_SYMBOL(copy_from_iter_nocache);
617 
618 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
619 {
620 	char *to = addr;
621 	if (unlikely(i->type & ITER_PIPE)) {
622 		WARN_ON(1);
623 		return false;
624 	}
625 	if (unlikely(i->count < bytes))
626 		return false;
627 	iterate_all_kinds(i, bytes, v, ({
628 		if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
629 					     v.iov_base, v.iov_len))
630 			return false;
631 		0;}),
632 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
633 				 v.bv_offset, v.bv_len),
634 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
635 	)
636 
637 	iov_iter_advance(i, bytes);
638 	return true;
639 }
640 EXPORT_SYMBOL(copy_from_iter_full_nocache);
641 
642 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
643 			 struct iov_iter *i)
644 {
645 	if (i->type & (ITER_BVEC|ITER_KVEC)) {
646 		void *kaddr = kmap_atomic(page);
647 		size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
648 		kunmap_atomic(kaddr);
649 		return wanted;
650 	} else if (likely(!(i->type & ITER_PIPE)))
651 		return copy_page_to_iter_iovec(page, offset, bytes, i);
652 	else
653 		return copy_page_to_iter_pipe(page, offset, bytes, i);
654 }
655 EXPORT_SYMBOL(copy_page_to_iter);
656 
657 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
658 			 struct iov_iter *i)
659 {
660 	if (unlikely(i->type & ITER_PIPE)) {
661 		WARN_ON(1);
662 		return 0;
663 	}
664 	if (i->type & (ITER_BVEC|ITER_KVEC)) {
665 		void *kaddr = kmap_atomic(page);
666 		size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
667 		kunmap_atomic(kaddr);
668 		return wanted;
669 	} else
670 		return copy_page_from_iter_iovec(page, offset, bytes, i);
671 }
672 EXPORT_SYMBOL(copy_page_from_iter);
673 
674 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
675 {
676 	struct pipe_inode_info *pipe = i->pipe;
677 	size_t n, off;
678 	int idx;
679 
680 	if (!sanity(i))
681 		return 0;
682 
683 	bytes = n = push_pipe(i, bytes, &idx, &off);
684 	if (unlikely(!n))
685 		return 0;
686 
687 	for ( ; n; idx = next_idx(idx, pipe), off = 0) {
688 		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
689 		memzero_page(pipe->bufs[idx].page, off, chunk);
690 		i->idx = idx;
691 		i->iov_offset = off + chunk;
692 		n -= chunk;
693 	}
694 	i->count -= bytes;
695 	return bytes;
696 }
697 
698 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
699 {
700 	if (unlikely(i->type & ITER_PIPE))
701 		return pipe_zero(bytes, i);
702 	iterate_and_advance(i, bytes, v,
703 		__clear_user(v.iov_base, v.iov_len),
704 		memzero_page(v.bv_page, v.bv_offset, v.bv_len),
705 		memset(v.iov_base, 0, v.iov_len)
706 	)
707 
708 	return bytes;
709 }
710 EXPORT_SYMBOL(iov_iter_zero);
711 
712 size_t iov_iter_copy_from_user_atomic(struct page *page,
713 		struct iov_iter *i, unsigned long offset, size_t bytes)
714 {
715 	char *kaddr = kmap_atomic(page), *p = kaddr + offset;
716 	if (unlikely(i->type & ITER_PIPE)) {
717 		kunmap_atomic(kaddr);
718 		WARN_ON(1);
719 		return 0;
720 	}
721 	iterate_all_kinds(i, bytes, v,
722 		__copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
723 					  v.iov_base, v.iov_len),
724 		memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
725 				 v.bv_offset, v.bv_len),
726 		memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
727 	)
728 	kunmap_atomic(kaddr);
729 	return bytes;
730 }
731 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
732 
733 static inline void pipe_truncate(struct iov_iter *i)
734 {
735 	struct pipe_inode_info *pipe = i->pipe;
736 	if (pipe->nrbufs) {
737 		size_t off = i->iov_offset;
738 		int idx = i->idx;
739 		int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
740 		if (off) {
741 			pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
742 			idx = next_idx(idx, pipe);
743 			nrbufs++;
744 		}
745 		while (pipe->nrbufs > nrbufs) {
746 			pipe_buf_release(pipe, &pipe->bufs[idx]);
747 			idx = next_idx(idx, pipe);
748 			pipe->nrbufs--;
749 		}
750 	}
751 }
752 
753 static void pipe_advance(struct iov_iter *i, size_t size)
754 {
755 	struct pipe_inode_info *pipe = i->pipe;
756 	if (unlikely(i->count < size))
757 		size = i->count;
758 	if (size) {
759 		struct pipe_buffer *buf;
760 		size_t off = i->iov_offset, left = size;
761 		int idx = i->idx;
762 		if (off) /* make it relative to the beginning of buffer */
763 			left += off - pipe->bufs[idx].offset;
764 		while (1) {
765 			buf = &pipe->bufs[idx];
766 			if (left <= buf->len)
767 				break;
768 			left -= buf->len;
769 			idx = next_idx(idx, pipe);
770 		}
771 		i->idx = idx;
772 		i->iov_offset = buf->offset + left;
773 	}
774 	i->count -= size;
775 	/* ... and discard everything past that point */
776 	pipe_truncate(i);
777 }
778 
779 void iov_iter_advance(struct iov_iter *i, size_t size)
780 {
781 	if (unlikely(i->type & ITER_PIPE)) {
782 		pipe_advance(i, size);
783 		return;
784 	}
785 	iterate_and_advance(i, size, v, 0, 0, 0)
786 }
787 EXPORT_SYMBOL(iov_iter_advance);
788 
789 void iov_iter_revert(struct iov_iter *i, size_t unroll)
790 {
791 	if (!unroll)
792 		return;
793 	if (WARN_ON(unroll > MAX_RW_COUNT))
794 		return;
795 	i->count += unroll;
796 	if (unlikely(i->type & ITER_PIPE)) {
797 		struct pipe_inode_info *pipe = i->pipe;
798 		int idx = i->idx;
799 		size_t off = i->iov_offset;
800 		while (1) {
801 			size_t n = off - pipe->bufs[idx].offset;
802 			if (unroll < n) {
803 				off -= unroll;
804 				break;
805 			}
806 			unroll -= n;
807 			if (!unroll && idx == i->start_idx) {
808 				off = 0;
809 				break;
810 			}
811 			if (!idx--)
812 				idx = pipe->buffers - 1;
813 			off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
814 		}
815 		i->iov_offset = off;
816 		i->idx = idx;
817 		pipe_truncate(i);
818 		return;
819 	}
820 	if (unroll <= i->iov_offset) {
821 		i->iov_offset -= unroll;
822 		return;
823 	}
824 	unroll -= i->iov_offset;
825 	if (i->type & ITER_BVEC) {
826 		const struct bio_vec *bvec = i->bvec;
827 		while (1) {
828 			size_t n = (--bvec)->bv_len;
829 			i->nr_segs++;
830 			if (unroll <= n) {
831 				i->bvec = bvec;
832 				i->iov_offset = n - unroll;
833 				return;
834 			}
835 			unroll -= n;
836 		}
837 	} else { /* same logics for iovec and kvec */
838 		const struct iovec *iov = i->iov;
839 		while (1) {
840 			size_t n = (--iov)->iov_len;
841 			i->nr_segs++;
842 			if (unroll <= n) {
843 				i->iov = iov;
844 				i->iov_offset = n - unroll;
845 				return;
846 			}
847 			unroll -= n;
848 		}
849 	}
850 }
851 EXPORT_SYMBOL(iov_iter_revert);
852 
853 /*
854  * Return the count of just the current iov_iter segment.
855  */
856 size_t iov_iter_single_seg_count(const struct iov_iter *i)
857 {
858 	if (unlikely(i->type & ITER_PIPE))
859 		return i->count;	// it is a silly place, anyway
860 	if (i->nr_segs == 1)
861 		return i->count;
862 	else if (i->type & ITER_BVEC)
863 		return min(i->count, i->bvec->bv_len - i->iov_offset);
864 	else
865 		return min(i->count, i->iov->iov_len - i->iov_offset);
866 }
867 EXPORT_SYMBOL(iov_iter_single_seg_count);
868 
869 void iov_iter_kvec(struct iov_iter *i, int direction,
870 			const struct kvec *kvec, unsigned long nr_segs,
871 			size_t count)
872 {
873 	BUG_ON(!(direction & ITER_KVEC));
874 	i->type = direction;
875 	i->kvec = kvec;
876 	i->nr_segs = nr_segs;
877 	i->iov_offset = 0;
878 	i->count = count;
879 }
880 EXPORT_SYMBOL(iov_iter_kvec);
881 
882 void iov_iter_bvec(struct iov_iter *i, int direction,
883 			const struct bio_vec *bvec, unsigned long nr_segs,
884 			size_t count)
885 {
886 	BUG_ON(!(direction & ITER_BVEC));
887 	i->type = direction;
888 	i->bvec = bvec;
889 	i->nr_segs = nr_segs;
890 	i->iov_offset = 0;
891 	i->count = count;
892 }
893 EXPORT_SYMBOL(iov_iter_bvec);
894 
895 void iov_iter_pipe(struct iov_iter *i, int direction,
896 			struct pipe_inode_info *pipe,
897 			size_t count)
898 {
899 	BUG_ON(direction != ITER_PIPE);
900 	WARN_ON(pipe->nrbufs == pipe->buffers);
901 	i->type = direction;
902 	i->pipe = pipe;
903 	i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
904 	i->iov_offset = 0;
905 	i->count = count;
906 	i->start_idx = i->idx;
907 }
908 EXPORT_SYMBOL(iov_iter_pipe);
909 
910 unsigned long iov_iter_alignment(const struct iov_iter *i)
911 {
912 	unsigned long res = 0;
913 	size_t size = i->count;
914 
915 	if (unlikely(i->type & ITER_PIPE)) {
916 		if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
917 			return size | i->iov_offset;
918 		return size;
919 	}
920 	iterate_all_kinds(i, size, v,
921 		(res |= (unsigned long)v.iov_base | v.iov_len, 0),
922 		res |= v.bv_offset | v.bv_len,
923 		res |= (unsigned long)v.iov_base | v.iov_len
924 	)
925 	return res;
926 }
927 EXPORT_SYMBOL(iov_iter_alignment);
928 
929 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
930 {
931 	unsigned long res = 0;
932 	size_t size = i->count;
933 
934 	if (unlikely(i->type & ITER_PIPE)) {
935 		WARN_ON(1);
936 		return ~0U;
937 	}
938 
939 	iterate_all_kinds(i, size, v,
940 		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
941 			(size != v.iov_len ? size : 0), 0),
942 		(res |= (!res ? 0 : (unsigned long)v.bv_offset) |
943 			(size != v.bv_len ? size : 0)),
944 		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
945 			(size != v.iov_len ? size : 0))
946 		);
947 	return res;
948 }
949 EXPORT_SYMBOL(iov_iter_gap_alignment);
950 
951 static inline size_t __pipe_get_pages(struct iov_iter *i,
952 				size_t maxsize,
953 				struct page **pages,
954 				int idx,
955 				size_t *start)
956 {
957 	struct pipe_inode_info *pipe = i->pipe;
958 	ssize_t n = push_pipe(i, maxsize, &idx, start);
959 	if (!n)
960 		return -EFAULT;
961 
962 	maxsize = n;
963 	n += *start;
964 	while (n > 0) {
965 		get_page(*pages++ = pipe->bufs[idx].page);
966 		idx = next_idx(idx, pipe);
967 		n -= PAGE_SIZE;
968 	}
969 
970 	return maxsize;
971 }
972 
973 static ssize_t pipe_get_pages(struct iov_iter *i,
974 		   struct page **pages, size_t maxsize, unsigned maxpages,
975 		   size_t *start)
976 {
977 	unsigned npages;
978 	size_t capacity;
979 	int idx;
980 
981 	if (!maxsize)
982 		return 0;
983 
984 	if (!sanity(i))
985 		return -EFAULT;
986 
987 	data_start(i, &idx, start);
988 	/* some of this one + all after this one */
989 	npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
990 	capacity = min(npages,maxpages) * PAGE_SIZE - *start;
991 
992 	return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
993 }
994 
995 ssize_t iov_iter_get_pages(struct iov_iter *i,
996 		   struct page **pages, size_t maxsize, unsigned maxpages,
997 		   size_t *start)
998 {
999 	if (maxsize > i->count)
1000 		maxsize = i->count;
1001 
1002 	if (unlikely(i->type & ITER_PIPE))
1003 		return pipe_get_pages(i, pages, maxsize, maxpages, start);
1004 	iterate_all_kinds(i, maxsize, v, ({
1005 		unsigned long addr = (unsigned long)v.iov_base;
1006 		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1007 		int n;
1008 		int res;
1009 
1010 		if (len > maxpages * PAGE_SIZE)
1011 			len = maxpages * PAGE_SIZE;
1012 		addr &= ~(PAGE_SIZE - 1);
1013 		n = DIV_ROUND_UP(len, PAGE_SIZE);
1014 		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
1015 		if (unlikely(res < 0))
1016 			return res;
1017 		return (res == n ? len : res * PAGE_SIZE) - *start;
1018 	0;}),({
1019 		/* can't be more than PAGE_SIZE */
1020 		*start = v.bv_offset;
1021 		get_page(*pages = v.bv_page);
1022 		return v.bv_len;
1023 	}),({
1024 		return -EFAULT;
1025 	})
1026 	)
1027 	return 0;
1028 }
1029 EXPORT_SYMBOL(iov_iter_get_pages);
1030 
1031 static struct page **get_pages_array(size_t n)
1032 {
1033 	return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1034 }
1035 
1036 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1037 		   struct page ***pages, size_t maxsize,
1038 		   size_t *start)
1039 {
1040 	struct page **p;
1041 	size_t n;
1042 	int idx;
1043 	int npages;
1044 
1045 	if (!maxsize)
1046 		return 0;
1047 
1048 	if (!sanity(i))
1049 		return -EFAULT;
1050 
1051 	data_start(i, &idx, start);
1052 	/* some of this one + all after this one */
1053 	npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
1054 	n = npages * PAGE_SIZE - *start;
1055 	if (maxsize > n)
1056 		maxsize = n;
1057 	else
1058 		npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1059 	p = get_pages_array(npages);
1060 	if (!p)
1061 		return -ENOMEM;
1062 	n = __pipe_get_pages(i, maxsize, p, idx, start);
1063 	if (n > 0)
1064 		*pages = p;
1065 	else
1066 		kvfree(p);
1067 	return n;
1068 }
1069 
1070 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1071 		   struct page ***pages, size_t maxsize,
1072 		   size_t *start)
1073 {
1074 	struct page **p;
1075 
1076 	if (maxsize > i->count)
1077 		maxsize = i->count;
1078 
1079 	if (unlikely(i->type & ITER_PIPE))
1080 		return pipe_get_pages_alloc(i, pages, maxsize, start);
1081 	iterate_all_kinds(i, maxsize, v, ({
1082 		unsigned long addr = (unsigned long)v.iov_base;
1083 		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1084 		int n;
1085 		int res;
1086 
1087 		addr &= ~(PAGE_SIZE - 1);
1088 		n = DIV_ROUND_UP(len, PAGE_SIZE);
1089 		p = get_pages_array(n);
1090 		if (!p)
1091 			return -ENOMEM;
1092 		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
1093 		if (unlikely(res < 0)) {
1094 			kvfree(p);
1095 			return res;
1096 		}
1097 		*pages = p;
1098 		return (res == n ? len : res * PAGE_SIZE) - *start;
1099 	0;}),({
1100 		/* can't be more than PAGE_SIZE */
1101 		*start = v.bv_offset;
1102 		*pages = p = get_pages_array(1);
1103 		if (!p)
1104 			return -ENOMEM;
1105 		get_page(*p = v.bv_page);
1106 		return v.bv_len;
1107 	}),({
1108 		return -EFAULT;
1109 	})
1110 	)
1111 	return 0;
1112 }
1113 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1114 
1115 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1116 			       struct iov_iter *i)
1117 {
1118 	char *to = addr;
1119 	__wsum sum, next;
1120 	size_t off = 0;
1121 	sum = *csum;
1122 	if (unlikely(i->type & ITER_PIPE)) {
1123 		WARN_ON(1);
1124 		return 0;
1125 	}
1126 	iterate_and_advance(i, bytes, v, ({
1127 		int err = 0;
1128 		next = csum_and_copy_from_user(v.iov_base,
1129 					       (to += v.iov_len) - v.iov_len,
1130 					       v.iov_len, 0, &err);
1131 		if (!err) {
1132 			sum = csum_block_add(sum, next, off);
1133 			off += v.iov_len;
1134 		}
1135 		err ? v.iov_len : 0;
1136 	}), ({
1137 		char *p = kmap_atomic(v.bv_page);
1138 		next = csum_partial_copy_nocheck(p + v.bv_offset,
1139 						 (to += v.bv_len) - v.bv_len,
1140 						 v.bv_len, 0);
1141 		kunmap_atomic(p);
1142 		sum = csum_block_add(sum, next, off);
1143 		off += v.bv_len;
1144 	}),({
1145 		next = csum_partial_copy_nocheck(v.iov_base,
1146 						 (to += v.iov_len) - v.iov_len,
1147 						 v.iov_len, 0);
1148 		sum = csum_block_add(sum, next, off);
1149 		off += v.iov_len;
1150 	})
1151 	)
1152 	*csum = sum;
1153 	return bytes;
1154 }
1155 EXPORT_SYMBOL(csum_and_copy_from_iter);
1156 
1157 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1158 			       struct iov_iter *i)
1159 {
1160 	char *to = addr;
1161 	__wsum sum, next;
1162 	size_t off = 0;
1163 	sum = *csum;
1164 	if (unlikely(i->type & ITER_PIPE)) {
1165 		WARN_ON(1);
1166 		return false;
1167 	}
1168 	if (unlikely(i->count < bytes))
1169 		return false;
1170 	iterate_all_kinds(i, bytes, v, ({
1171 		int err = 0;
1172 		next = csum_and_copy_from_user(v.iov_base,
1173 					       (to += v.iov_len) - v.iov_len,
1174 					       v.iov_len, 0, &err);
1175 		if (err)
1176 			return false;
1177 		sum = csum_block_add(sum, next, off);
1178 		off += v.iov_len;
1179 		0;
1180 	}), ({
1181 		char *p = kmap_atomic(v.bv_page);
1182 		next = csum_partial_copy_nocheck(p + v.bv_offset,
1183 						 (to += v.bv_len) - v.bv_len,
1184 						 v.bv_len, 0);
1185 		kunmap_atomic(p);
1186 		sum = csum_block_add(sum, next, off);
1187 		off += v.bv_len;
1188 	}),({
1189 		next = csum_partial_copy_nocheck(v.iov_base,
1190 						 (to += v.iov_len) - v.iov_len,
1191 						 v.iov_len, 0);
1192 		sum = csum_block_add(sum, next, off);
1193 		off += v.iov_len;
1194 	})
1195 	)
1196 	*csum = sum;
1197 	iov_iter_advance(i, bytes);
1198 	return true;
1199 }
1200 EXPORT_SYMBOL(csum_and_copy_from_iter_full);
1201 
1202 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
1203 			     struct iov_iter *i)
1204 {
1205 	const char *from = addr;
1206 	__wsum sum, next;
1207 	size_t off = 0;
1208 	sum = *csum;
1209 	if (unlikely(i->type & ITER_PIPE)) {
1210 		WARN_ON(1);	/* for now */
1211 		return 0;
1212 	}
1213 	iterate_and_advance(i, bytes, v, ({
1214 		int err = 0;
1215 		next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1216 					     v.iov_base,
1217 					     v.iov_len, 0, &err);
1218 		if (!err) {
1219 			sum = csum_block_add(sum, next, off);
1220 			off += v.iov_len;
1221 		}
1222 		err ? v.iov_len : 0;
1223 	}), ({
1224 		char *p = kmap_atomic(v.bv_page);
1225 		next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
1226 						 p + v.bv_offset,
1227 						 v.bv_len, 0);
1228 		kunmap_atomic(p);
1229 		sum = csum_block_add(sum, next, off);
1230 		off += v.bv_len;
1231 	}),({
1232 		next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
1233 						 v.iov_base,
1234 						 v.iov_len, 0);
1235 		sum = csum_block_add(sum, next, off);
1236 		off += v.iov_len;
1237 	})
1238 	)
1239 	*csum = sum;
1240 	return bytes;
1241 }
1242 EXPORT_SYMBOL(csum_and_copy_to_iter);
1243 
1244 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1245 {
1246 	size_t size = i->count;
1247 	int npages = 0;
1248 
1249 	if (!size)
1250 		return 0;
1251 
1252 	if (unlikely(i->type & ITER_PIPE)) {
1253 		struct pipe_inode_info *pipe = i->pipe;
1254 		size_t off;
1255 		int idx;
1256 
1257 		if (!sanity(i))
1258 			return 0;
1259 
1260 		data_start(i, &idx, &off);
1261 		/* some of this one + all after this one */
1262 		npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
1263 		if (npages >= maxpages)
1264 			return maxpages;
1265 	} else iterate_all_kinds(i, size, v, ({
1266 		unsigned long p = (unsigned long)v.iov_base;
1267 		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1268 			- p / PAGE_SIZE;
1269 		if (npages >= maxpages)
1270 			return maxpages;
1271 	0;}),({
1272 		npages++;
1273 		if (npages >= maxpages)
1274 			return maxpages;
1275 	}),({
1276 		unsigned long p = (unsigned long)v.iov_base;
1277 		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1278 			- p / PAGE_SIZE;
1279 		if (npages >= maxpages)
1280 			return maxpages;
1281 	})
1282 	)
1283 	return npages;
1284 }
1285 EXPORT_SYMBOL(iov_iter_npages);
1286 
1287 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1288 {
1289 	*new = *old;
1290 	if (unlikely(new->type & ITER_PIPE)) {
1291 		WARN_ON(1);
1292 		return NULL;
1293 	}
1294 	if (new->type & ITER_BVEC)
1295 		return new->bvec = kmemdup(new->bvec,
1296 				    new->nr_segs * sizeof(struct bio_vec),
1297 				    flags);
1298 	else
1299 		/* iovec and kvec have identical layout */
1300 		return new->iov = kmemdup(new->iov,
1301 				   new->nr_segs * sizeof(struct iovec),
1302 				   flags);
1303 }
1304 EXPORT_SYMBOL(dup_iter);
1305 
1306 /**
1307  * import_iovec() - Copy an array of &struct iovec from userspace
1308  *     into the kernel, check that it is valid, and initialize a new
1309  *     &struct iov_iter iterator to access it.
1310  *
1311  * @type: One of %READ or %WRITE.
1312  * @uvector: Pointer to the userspace array.
1313  * @nr_segs: Number of elements in userspace array.
1314  * @fast_segs: Number of elements in @iov.
1315  * @iov: (input and output parameter) Pointer to pointer to (usually small
1316  *     on-stack) kernel array.
1317  * @i: Pointer to iterator that will be initialized on success.
1318  *
1319  * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1320  * then this function places %NULL in *@iov on return. Otherwise, a new
1321  * array will be allocated and the result placed in *@iov. This means that
1322  * the caller may call kfree() on *@iov regardless of whether the small
1323  * on-stack array was used or not (and regardless of whether this function
1324  * returns an error or not).
1325  *
1326  * Return: 0 on success or negative error code on error.
1327  */
1328 int import_iovec(int type, const struct iovec __user * uvector,
1329 		 unsigned nr_segs, unsigned fast_segs,
1330 		 struct iovec **iov, struct iov_iter *i)
1331 {
1332 	ssize_t n;
1333 	struct iovec *p;
1334 	n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1335 				  *iov, &p);
1336 	if (n < 0) {
1337 		if (p != *iov)
1338 			kfree(p);
1339 		*iov = NULL;
1340 		return n;
1341 	}
1342 	iov_iter_init(i, type, p, nr_segs, n);
1343 	*iov = p == *iov ? NULL : p;
1344 	return 0;
1345 }
1346 EXPORT_SYMBOL(import_iovec);
1347 
1348 #ifdef CONFIG_COMPAT
1349 #include <linux/compat.h>
1350 
1351 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
1352 		 unsigned nr_segs, unsigned fast_segs,
1353 		 struct iovec **iov, struct iov_iter *i)
1354 {
1355 	ssize_t n;
1356 	struct iovec *p;
1357 	n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1358 				  *iov, &p);
1359 	if (n < 0) {
1360 		if (p != *iov)
1361 			kfree(p);
1362 		*iov = NULL;
1363 		return n;
1364 	}
1365 	iov_iter_init(i, type, p, nr_segs, n);
1366 	*iov = p == *iov ? NULL : p;
1367 	return 0;
1368 }
1369 #endif
1370 
1371 int import_single_range(int rw, void __user *buf, size_t len,
1372 		 struct iovec *iov, struct iov_iter *i)
1373 {
1374 	if (len > MAX_RW_COUNT)
1375 		len = MAX_RW_COUNT;
1376 	if (unlikely(!access_ok(!rw, buf, len)))
1377 		return -EFAULT;
1378 
1379 	iov->iov_base = buf;
1380 	iov->iov_len = len;
1381 	iov_iter_init(i, rw, iov, 1, len);
1382 	return 0;
1383 }
1384 EXPORT_SYMBOL(import_single_range);
1385