xref: /openbmc/linux/lib/iov_iter.c (revision e1267585)
1d879cb83SAl Viro #include <linux/export.h>
2d879cb83SAl Viro #include <linux/uio.h>
3d879cb83SAl Viro #include <linux/pagemap.h>
4d879cb83SAl Viro #include <linux/slab.h>
5d879cb83SAl Viro #include <linux/vmalloc.h>
6d879cb83SAl Viro #include <net/checksum.h>
7d879cb83SAl Viro 
8d879cb83SAl Viro #define iterate_iovec(i, n, __v, __p, skip, STEP) {	\
9d879cb83SAl Viro 	size_t left;					\
10d879cb83SAl Viro 	size_t wanted = n;				\
11d879cb83SAl Viro 	__p = i->iov;					\
12d879cb83SAl Viro 	__v.iov_len = min(n, __p->iov_len - skip);	\
13d879cb83SAl Viro 	if (likely(__v.iov_len)) {			\
14d879cb83SAl Viro 		__v.iov_base = __p->iov_base + skip;	\
15d879cb83SAl Viro 		left = (STEP);				\
16d879cb83SAl Viro 		__v.iov_len -= left;			\
17d879cb83SAl Viro 		skip += __v.iov_len;			\
18d879cb83SAl Viro 		n -= __v.iov_len;			\
19d879cb83SAl Viro 	} else {					\
20d879cb83SAl Viro 		left = 0;				\
21d879cb83SAl Viro 	}						\
22d879cb83SAl Viro 	while (unlikely(!left && n)) {			\
23d879cb83SAl Viro 		__p++;					\
24d879cb83SAl Viro 		__v.iov_len = min(n, __p->iov_len);	\
25d879cb83SAl Viro 		if (unlikely(!__v.iov_len))		\
26d879cb83SAl Viro 			continue;			\
27d879cb83SAl Viro 		__v.iov_base = __p->iov_base;		\
28d879cb83SAl Viro 		left = (STEP);				\
29d879cb83SAl Viro 		__v.iov_len -= left;			\
30d879cb83SAl Viro 		skip = __v.iov_len;			\
31d879cb83SAl Viro 		n -= __v.iov_len;			\
32d879cb83SAl Viro 	}						\
33d879cb83SAl Viro 	n = wanted - n;					\
34d879cb83SAl Viro }
35d879cb83SAl Viro 
36d879cb83SAl Viro #define iterate_kvec(i, n, __v, __p, skip, STEP) {	\
37d879cb83SAl Viro 	size_t wanted = n;				\
38d879cb83SAl Viro 	__p = i->kvec;					\
39d879cb83SAl Viro 	__v.iov_len = min(n, __p->iov_len - skip);	\
40d879cb83SAl Viro 	if (likely(__v.iov_len)) {			\
41d879cb83SAl Viro 		__v.iov_base = __p->iov_base + skip;	\
42d879cb83SAl Viro 		(void)(STEP);				\
43d879cb83SAl Viro 		skip += __v.iov_len;			\
44d879cb83SAl Viro 		n -= __v.iov_len;			\
45d879cb83SAl Viro 	}						\
46d879cb83SAl Viro 	while (unlikely(n)) {				\
47d879cb83SAl Viro 		__p++;					\
48d879cb83SAl Viro 		__v.iov_len = min(n, __p->iov_len);	\
49d879cb83SAl Viro 		if (unlikely(!__v.iov_len))		\
50d879cb83SAl Viro 			continue;			\
51d879cb83SAl Viro 		__v.iov_base = __p->iov_base;		\
52d879cb83SAl Viro 		(void)(STEP);				\
53d879cb83SAl Viro 		skip = __v.iov_len;			\
54d879cb83SAl Viro 		n -= __v.iov_len;			\
55d879cb83SAl Viro 	}						\
56d879cb83SAl Viro 	n = wanted;					\
57d879cb83SAl Viro }
58d879cb83SAl Viro 
59d879cb83SAl Viro #define iterate_bvec(i, n, __v, __p, skip, STEP) {	\
60d879cb83SAl Viro 	size_t wanted = n;				\
61d879cb83SAl Viro 	__p = i->bvec;					\
62d879cb83SAl Viro 	__v.bv_len = min_t(size_t, n, __p->bv_len - skip);	\
63d879cb83SAl Viro 	if (likely(__v.bv_len)) {			\
64d879cb83SAl Viro 		__v.bv_page = __p->bv_page;		\
65d879cb83SAl Viro 		__v.bv_offset = __p->bv_offset + skip; 	\
66d879cb83SAl Viro 		(void)(STEP);				\
67d879cb83SAl Viro 		skip += __v.bv_len;			\
68d879cb83SAl Viro 		n -= __v.bv_len;			\
69d879cb83SAl Viro 	}						\
70d879cb83SAl Viro 	while (unlikely(n)) {				\
71d879cb83SAl Viro 		__p++;					\
72d879cb83SAl Viro 		__v.bv_len = min_t(size_t, n, __p->bv_len);	\
73d879cb83SAl Viro 		if (unlikely(!__v.bv_len))		\
74d879cb83SAl Viro 			continue;			\
75d879cb83SAl Viro 		__v.bv_page = __p->bv_page;		\
76d879cb83SAl Viro 		__v.bv_offset = __p->bv_offset;		\
77d879cb83SAl Viro 		(void)(STEP);				\
78d879cb83SAl Viro 		skip = __v.bv_len;			\
79d879cb83SAl Viro 		n -= __v.bv_len;			\
80d879cb83SAl Viro 	}						\
81d879cb83SAl Viro 	n = wanted;					\
82d879cb83SAl Viro }
83d879cb83SAl Viro 
84d879cb83SAl Viro #define iterate_all_kinds(i, n, v, I, B, K) {			\
85d879cb83SAl Viro 	size_t skip = i->iov_offset;				\
86d879cb83SAl Viro 	if (unlikely(i->type & ITER_BVEC)) {			\
87d879cb83SAl Viro 		const struct bio_vec *bvec;			\
88d879cb83SAl Viro 		struct bio_vec v;				\
89d879cb83SAl Viro 		iterate_bvec(i, n, v, bvec, skip, (B))		\
90d879cb83SAl Viro 	} else if (unlikely(i->type & ITER_KVEC)) {		\
91d879cb83SAl Viro 		const struct kvec *kvec;			\
92d879cb83SAl Viro 		struct kvec v;					\
93d879cb83SAl Viro 		iterate_kvec(i, n, v, kvec, skip, (K))		\
94d879cb83SAl Viro 	} else {						\
95d879cb83SAl Viro 		const struct iovec *iov;			\
96d879cb83SAl Viro 		struct iovec v;					\
97d879cb83SAl Viro 		iterate_iovec(i, n, v, iov, skip, (I))		\
98d879cb83SAl Viro 	}							\
99d879cb83SAl Viro }
100d879cb83SAl Viro 
101d879cb83SAl Viro #define iterate_and_advance(i, n, v, I, B, K) {			\
102d879cb83SAl Viro 	size_t skip = i->iov_offset;				\
103d879cb83SAl Viro 	if (unlikely(i->type & ITER_BVEC)) {			\
104d879cb83SAl Viro 		const struct bio_vec *bvec;			\
105d879cb83SAl Viro 		struct bio_vec v;				\
106d879cb83SAl Viro 		iterate_bvec(i, n, v, bvec, skip, (B))		\
107d879cb83SAl Viro 		if (skip == bvec->bv_len) {			\
108d879cb83SAl Viro 			bvec++;					\
109d879cb83SAl Viro 			skip = 0;				\
110d879cb83SAl Viro 		}						\
111d879cb83SAl Viro 		i->nr_segs -= bvec - i->bvec;			\
112d879cb83SAl Viro 		i->bvec = bvec;					\
113d879cb83SAl Viro 	} else if (unlikely(i->type & ITER_KVEC)) {		\
114d879cb83SAl Viro 		const struct kvec *kvec;			\
115d879cb83SAl Viro 		struct kvec v;					\
116d879cb83SAl Viro 		iterate_kvec(i, n, v, kvec, skip, (K))		\
117d879cb83SAl Viro 		if (skip == kvec->iov_len) {			\
118d879cb83SAl Viro 			kvec++;					\
119d879cb83SAl Viro 			skip = 0;				\
120d879cb83SAl Viro 		}						\
121d879cb83SAl Viro 		i->nr_segs -= kvec - i->kvec;			\
122d879cb83SAl Viro 		i->kvec = kvec;					\
123d879cb83SAl Viro 	} else {						\
124d879cb83SAl Viro 		const struct iovec *iov;			\
125d879cb83SAl Viro 		struct iovec v;					\
126d879cb83SAl Viro 		iterate_iovec(i, n, v, iov, skip, (I))		\
127d879cb83SAl Viro 		if (skip == iov->iov_len) {			\
128d879cb83SAl Viro 			iov++;					\
129d879cb83SAl Viro 			skip = 0;				\
130d879cb83SAl Viro 		}						\
131d879cb83SAl Viro 		i->nr_segs -= iov - i->iov;			\
132d879cb83SAl Viro 		i->iov = iov;					\
133d879cb83SAl Viro 	}							\
134d879cb83SAl Viro 	i->count -= n;						\
135d879cb83SAl Viro 	i->iov_offset = skip;					\
136d879cb83SAl Viro }
137d879cb83SAl Viro 
138d879cb83SAl Viro static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
139d879cb83SAl Viro 			 struct iov_iter *i)
140d879cb83SAl Viro {
141d879cb83SAl Viro 	size_t skip, copy, left, wanted;
142d879cb83SAl Viro 	const struct iovec *iov;
143d879cb83SAl Viro 	char __user *buf;
144d879cb83SAl Viro 	void *kaddr, *from;
145d879cb83SAl Viro 
146d879cb83SAl Viro 	if (unlikely(bytes > i->count))
147d879cb83SAl Viro 		bytes = i->count;
148d879cb83SAl Viro 
149d879cb83SAl Viro 	if (unlikely(!bytes))
150d879cb83SAl Viro 		return 0;
151d879cb83SAl Viro 
152d879cb83SAl Viro 	wanted = bytes;
153d879cb83SAl Viro 	iov = i->iov;
154d879cb83SAl Viro 	skip = i->iov_offset;
155d879cb83SAl Viro 	buf = iov->iov_base + skip;
156d879cb83SAl Viro 	copy = min(bytes, iov->iov_len - skip);
157d879cb83SAl Viro 
158d879cb83SAl Viro 	if (!fault_in_pages_writeable(buf, copy)) {
159d879cb83SAl Viro 		kaddr = kmap_atomic(page);
160d879cb83SAl Viro 		from = kaddr + offset;
161d879cb83SAl Viro 
162d879cb83SAl Viro 		/* first chunk, usually the only one */
163d879cb83SAl Viro 		left = __copy_to_user_inatomic(buf, from, copy);
164d879cb83SAl Viro 		copy -= left;
165d879cb83SAl Viro 		skip += copy;
166d879cb83SAl Viro 		from += copy;
167d879cb83SAl Viro 		bytes -= copy;
168d879cb83SAl Viro 
169d879cb83SAl Viro 		while (unlikely(!left && bytes)) {
170d879cb83SAl Viro 			iov++;
171d879cb83SAl Viro 			buf = iov->iov_base;
172d879cb83SAl Viro 			copy = min(bytes, iov->iov_len);
173d879cb83SAl Viro 			left = __copy_to_user_inatomic(buf, from, copy);
174d879cb83SAl Viro 			copy -= left;
175d879cb83SAl Viro 			skip = copy;
176d879cb83SAl Viro 			from += copy;
177d879cb83SAl Viro 			bytes -= copy;
178d879cb83SAl Viro 		}
179d879cb83SAl Viro 		if (likely(!bytes)) {
180d879cb83SAl Viro 			kunmap_atomic(kaddr);
181d879cb83SAl Viro 			goto done;
182d879cb83SAl Viro 		}
183d879cb83SAl Viro 		offset = from - kaddr;
184d879cb83SAl Viro 		buf += copy;
185d879cb83SAl Viro 		kunmap_atomic(kaddr);
186d879cb83SAl Viro 		copy = min(bytes, iov->iov_len - skip);
187d879cb83SAl Viro 	}
188d879cb83SAl Viro 	/* Too bad - revert to non-atomic kmap */
189d879cb83SAl Viro 	kaddr = kmap(page);
190d879cb83SAl Viro 	from = kaddr + offset;
191d879cb83SAl Viro 	left = __copy_to_user(buf, from, copy);
192d879cb83SAl Viro 	copy -= left;
193d879cb83SAl Viro 	skip += copy;
194d879cb83SAl Viro 	from += copy;
195d879cb83SAl Viro 	bytes -= copy;
196d879cb83SAl Viro 	while (unlikely(!left && bytes)) {
197d879cb83SAl Viro 		iov++;
198d879cb83SAl Viro 		buf = iov->iov_base;
199d879cb83SAl Viro 		copy = min(bytes, iov->iov_len);
200d879cb83SAl Viro 		left = __copy_to_user(buf, from, copy);
201d879cb83SAl Viro 		copy -= left;
202d879cb83SAl Viro 		skip = copy;
203d879cb83SAl Viro 		from += copy;
204d879cb83SAl Viro 		bytes -= copy;
205d879cb83SAl Viro 	}
206d879cb83SAl Viro 	kunmap(page);
207d879cb83SAl Viro done:
208d879cb83SAl Viro 	if (skip == iov->iov_len) {
209d879cb83SAl Viro 		iov++;
210d879cb83SAl Viro 		skip = 0;
211d879cb83SAl Viro 	}
212d879cb83SAl Viro 	i->count -= wanted - bytes;
213d879cb83SAl Viro 	i->nr_segs -= iov - i->iov;
214d879cb83SAl Viro 	i->iov = iov;
215d879cb83SAl Viro 	i->iov_offset = skip;
216d879cb83SAl Viro 	return wanted - bytes;
217d879cb83SAl Viro }
218d879cb83SAl Viro 
219d879cb83SAl Viro static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
220d879cb83SAl Viro 			 struct iov_iter *i)
221d879cb83SAl Viro {
222d879cb83SAl Viro 	size_t skip, copy, left, wanted;
223d879cb83SAl Viro 	const struct iovec *iov;
224d879cb83SAl Viro 	char __user *buf;
225d879cb83SAl Viro 	void *kaddr, *to;
226d879cb83SAl Viro 
227d879cb83SAl Viro 	if (unlikely(bytes > i->count))
228d879cb83SAl Viro 		bytes = i->count;
229d879cb83SAl Viro 
230d879cb83SAl Viro 	if (unlikely(!bytes))
231d879cb83SAl Viro 		return 0;
232d879cb83SAl Viro 
233d879cb83SAl Viro 	wanted = bytes;
234d879cb83SAl Viro 	iov = i->iov;
235d879cb83SAl Viro 	skip = i->iov_offset;
236d879cb83SAl Viro 	buf = iov->iov_base + skip;
237d879cb83SAl Viro 	copy = min(bytes, iov->iov_len - skip);
238d879cb83SAl Viro 
239d879cb83SAl Viro 	if (!fault_in_pages_readable(buf, copy)) {
240d879cb83SAl Viro 		kaddr = kmap_atomic(page);
241d879cb83SAl Viro 		to = kaddr + offset;
242d879cb83SAl Viro 
243d879cb83SAl Viro 		/* first chunk, usually the only one */
244d879cb83SAl Viro 		left = __copy_from_user_inatomic(to, buf, copy);
245d879cb83SAl Viro 		copy -= left;
246d879cb83SAl Viro 		skip += copy;
247d879cb83SAl Viro 		to += copy;
248d879cb83SAl Viro 		bytes -= copy;
249d879cb83SAl Viro 
250d879cb83SAl Viro 		while (unlikely(!left && bytes)) {
251d879cb83SAl Viro 			iov++;
252d879cb83SAl Viro 			buf = iov->iov_base;
253d879cb83SAl Viro 			copy = min(bytes, iov->iov_len);
254d879cb83SAl Viro 			left = __copy_from_user_inatomic(to, buf, copy);
255d879cb83SAl Viro 			copy -= left;
256d879cb83SAl Viro 			skip = copy;
257d879cb83SAl Viro 			to += copy;
258d879cb83SAl Viro 			bytes -= copy;
259d879cb83SAl Viro 		}
260d879cb83SAl Viro 		if (likely(!bytes)) {
261d879cb83SAl Viro 			kunmap_atomic(kaddr);
262d879cb83SAl Viro 			goto done;
263d879cb83SAl Viro 		}
264d879cb83SAl Viro 		offset = to - kaddr;
265d879cb83SAl Viro 		buf += copy;
266d879cb83SAl Viro 		kunmap_atomic(kaddr);
267d879cb83SAl Viro 		copy = min(bytes, iov->iov_len - skip);
268d879cb83SAl Viro 	}
269d879cb83SAl Viro 	/* Too bad - revert to non-atomic kmap */
270d879cb83SAl Viro 	kaddr = kmap(page);
271d879cb83SAl Viro 	to = kaddr + offset;
272d879cb83SAl Viro 	left = __copy_from_user(to, buf, copy);
273d879cb83SAl Viro 	copy -= left;
274d879cb83SAl Viro 	skip += copy;
275d879cb83SAl Viro 	to += copy;
276d879cb83SAl Viro 	bytes -= copy;
277d879cb83SAl Viro 	while (unlikely(!left && bytes)) {
278d879cb83SAl Viro 		iov++;
279d879cb83SAl Viro 		buf = iov->iov_base;
280d879cb83SAl Viro 		copy = min(bytes, iov->iov_len);
281d879cb83SAl Viro 		left = __copy_from_user(to, buf, copy);
282d879cb83SAl Viro 		copy -= left;
283d879cb83SAl Viro 		skip = copy;
284d879cb83SAl Viro 		to += copy;
285d879cb83SAl Viro 		bytes -= copy;
286d879cb83SAl Viro 	}
287d879cb83SAl Viro 	kunmap(page);
288d879cb83SAl Viro done:
289d879cb83SAl Viro 	if (skip == iov->iov_len) {
290d879cb83SAl Viro 		iov++;
291d879cb83SAl Viro 		skip = 0;
292d879cb83SAl Viro 	}
293d879cb83SAl Viro 	i->count -= wanted - bytes;
294d879cb83SAl Viro 	i->nr_segs -= iov - i->iov;
295d879cb83SAl Viro 	i->iov = iov;
296d879cb83SAl Viro 	i->iov_offset = skip;
297d879cb83SAl Viro 	return wanted - bytes;
298d879cb83SAl Viro }
299d879cb83SAl Viro 
300d879cb83SAl Viro /*
301d879cb83SAl Viro  * Fault in the first iovec of the given iov_iter, to a maximum length
302d879cb83SAl Viro  * of bytes. Returns 0 on success, or non-zero if the memory could not be
303d879cb83SAl Viro  * accessed (ie. because it is an invalid address).
304d879cb83SAl Viro  *
305d879cb83SAl Viro  * writev-intensive code may want this to prefault several iovecs -- that
306d879cb83SAl Viro  * would be possible (callers must not rely on the fact that _only_ the
307d879cb83SAl Viro  * first iovec will be faulted with the current implementation).
308d879cb83SAl Viro  */
309d879cb83SAl Viro int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
310d879cb83SAl Viro {
311d879cb83SAl Viro 	if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
312d879cb83SAl Viro 		char __user *buf = i->iov->iov_base + i->iov_offset;
313d879cb83SAl Viro 		bytes = min(bytes, i->iov->iov_len - i->iov_offset);
314d879cb83SAl Viro 		return fault_in_pages_readable(buf, bytes);
315d879cb83SAl Viro 	}
316d879cb83SAl Viro 	return 0;
317d879cb83SAl Viro }
318d879cb83SAl Viro EXPORT_SYMBOL(iov_iter_fault_in_readable);
319d879cb83SAl Viro 
320171a0203SAnton Altaparmakov /*
321171a0203SAnton Altaparmakov  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
322171a0203SAnton Altaparmakov  * bytes.  For each iovec, fault in each page that constitutes the iovec.
323171a0203SAnton Altaparmakov  *
324171a0203SAnton Altaparmakov  * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
325171a0203SAnton Altaparmakov  * because it is an invalid address).
326171a0203SAnton Altaparmakov  */
327171a0203SAnton Altaparmakov int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
328171a0203SAnton Altaparmakov {
329171a0203SAnton Altaparmakov 	size_t skip = i->iov_offset;
330171a0203SAnton Altaparmakov 	const struct iovec *iov;
331171a0203SAnton Altaparmakov 	int err;
332171a0203SAnton Altaparmakov 	struct iovec v;
333171a0203SAnton Altaparmakov 
334171a0203SAnton Altaparmakov 	if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
335171a0203SAnton Altaparmakov 		iterate_iovec(i, bytes, v, iov, skip, ({
336171a0203SAnton Altaparmakov 			err = fault_in_multipages_readable(v.iov_base,
337171a0203SAnton Altaparmakov 					v.iov_len);
338171a0203SAnton Altaparmakov 			if (unlikely(err))
339171a0203SAnton Altaparmakov 			return err;
340171a0203SAnton Altaparmakov 		0;}))
341171a0203SAnton Altaparmakov 	}
342171a0203SAnton Altaparmakov 	return 0;
343171a0203SAnton Altaparmakov }
344171a0203SAnton Altaparmakov EXPORT_SYMBOL(iov_iter_fault_in_multipages_readable);
345171a0203SAnton Altaparmakov 
346d879cb83SAl Viro void iov_iter_init(struct iov_iter *i, int direction,
347d879cb83SAl Viro 			const struct iovec *iov, unsigned long nr_segs,
348d879cb83SAl Viro 			size_t count)
349d879cb83SAl Viro {
350d879cb83SAl Viro 	/* It will get better.  Eventually... */
351d879cb83SAl Viro 	if (segment_eq(get_fs(), KERNEL_DS)) {
352d879cb83SAl Viro 		direction |= ITER_KVEC;
353d879cb83SAl Viro 		i->type = direction;
354d879cb83SAl Viro 		i->kvec = (struct kvec *)iov;
355d879cb83SAl Viro 	} else {
356d879cb83SAl Viro 		i->type = direction;
357d879cb83SAl Viro 		i->iov = iov;
358d879cb83SAl Viro 	}
359d879cb83SAl Viro 	i->nr_segs = nr_segs;
360d879cb83SAl Viro 	i->iov_offset = 0;
361d879cb83SAl Viro 	i->count = count;
362d879cb83SAl Viro }
363d879cb83SAl Viro EXPORT_SYMBOL(iov_iter_init);
364d879cb83SAl Viro 
365d879cb83SAl Viro static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
366d879cb83SAl Viro {
367d879cb83SAl Viro 	char *from = kmap_atomic(page);
368d879cb83SAl Viro 	memcpy(to, from + offset, len);
369d879cb83SAl Viro 	kunmap_atomic(from);
370d879cb83SAl Viro }
371d879cb83SAl Viro 
37236f7a8a4SAl Viro static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
373d879cb83SAl Viro {
374d879cb83SAl Viro 	char *to = kmap_atomic(page);
375d879cb83SAl Viro 	memcpy(to + offset, from, len);
376d879cb83SAl Viro 	kunmap_atomic(to);
377d879cb83SAl Viro }
378d879cb83SAl Viro 
379d879cb83SAl Viro static void memzero_page(struct page *page, size_t offset, size_t len)
380d879cb83SAl Viro {
381d879cb83SAl Viro 	char *addr = kmap_atomic(page);
382d879cb83SAl Viro 	memset(addr + offset, 0, len);
383d879cb83SAl Viro 	kunmap_atomic(addr);
384d879cb83SAl Viro }
385d879cb83SAl Viro 
38636f7a8a4SAl Viro size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
387d879cb83SAl Viro {
38836f7a8a4SAl Viro 	const char *from = addr;
389d879cb83SAl Viro 	if (unlikely(bytes > i->count))
390d879cb83SAl Viro 		bytes = i->count;
391d879cb83SAl Viro 
392d879cb83SAl Viro 	if (unlikely(!bytes))
393d879cb83SAl Viro 		return 0;
394d879cb83SAl Viro 
395d879cb83SAl Viro 	iterate_and_advance(i, bytes, v,
396d879cb83SAl Viro 		__copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
397d879cb83SAl Viro 			       v.iov_len),
398d879cb83SAl Viro 		memcpy_to_page(v.bv_page, v.bv_offset,
399d879cb83SAl Viro 			       (from += v.bv_len) - v.bv_len, v.bv_len),
400d879cb83SAl Viro 		memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
401d879cb83SAl Viro 	)
402d879cb83SAl Viro 
403d879cb83SAl Viro 	return bytes;
404d879cb83SAl Viro }
405d879cb83SAl Viro EXPORT_SYMBOL(copy_to_iter);
406d879cb83SAl Viro 
407d879cb83SAl Viro size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
408d879cb83SAl Viro {
409d879cb83SAl Viro 	char *to = addr;
410d879cb83SAl Viro 	if (unlikely(bytes > i->count))
411d879cb83SAl Viro 		bytes = i->count;
412d879cb83SAl Viro 
413d879cb83SAl Viro 	if (unlikely(!bytes))
414d879cb83SAl Viro 		return 0;
415d879cb83SAl Viro 
416d879cb83SAl Viro 	iterate_and_advance(i, bytes, v,
417d879cb83SAl Viro 		__copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
418d879cb83SAl Viro 				 v.iov_len),
419d879cb83SAl Viro 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
420d879cb83SAl Viro 				 v.bv_offset, v.bv_len),
421d879cb83SAl Viro 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
422d879cb83SAl Viro 	)
423d879cb83SAl Viro 
424d879cb83SAl Viro 	return bytes;
425d879cb83SAl Viro }
426d879cb83SAl Viro EXPORT_SYMBOL(copy_from_iter);
427d879cb83SAl Viro 
428d879cb83SAl Viro size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
429d879cb83SAl Viro {
430d879cb83SAl Viro 	char *to = addr;
431d879cb83SAl Viro 	if (unlikely(bytes > i->count))
432d879cb83SAl Viro 		bytes = i->count;
433d879cb83SAl Viro 
434d879cb83SAl Viro 	if (unlikely(!bytes))
435d879cb83SAl Viro 		return 0;
436d879cb83SAl Viro 
437d879cb83SAl Viro 	iterate_and_advance(i, bytes, v,
438d879cb83SAl Viro 		__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
439d879cb83SAl Viro 					 v.iov_base, v.iov_len),
440d879cb83SAl Viro 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
441d879cb83SAl Viro 				 v.bv_offset, v.bv_len),
442d879cb83SAl Viro 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
443d879cb83SAl Viro 	)
444d879cb83SAl Viro 
445d879cb83SAl Viro 	return bytes;
446d879cb83SAl Viro }
447d879cb83SAl Viro EXPORT_SYMBOL(copy_from_iter_nocache);
448d879cb83SAl Viro 
449d879cb83SAl Viro size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
450d879cb83SAl Viro 			 struct iov_iter *i)
451d879cb83SAl Viro {
452d879cb83SAl Viro 	if (i->type & (ITER_BVEC|ITER_KVEC)) {
453d879cb83SAl Viro 		void *kaddr = kmap_atomic(page);
454d879cb83SAl Viro 		size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
455d879cb83SAl Viro 		kunmap_atomic(kaddr);
456d879cb83SAl Viro 		return wanted;
457d879cb83SAl Viro 	} else
458d879cb83SAl Viro 		return copy_page_to_iter_iovec(page, offset, bytes, i);
459d879cb83SAl Viro }
460d879cb83SAl Viro EXPORT_SYMBOL(copy_page_to_iter);
461d879cb83SAl Viro 
462d879cb83SAl Viro size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
463d879cb83SAl Viro 			 struct iov_iter *i)
464d879cb83SAl Viro {
465d879cb83SAl Viro 	if (i->type & (ITER_BVEC|ITER_KVEC)) {
466d879cb83SAl Viro 		void *kaddr = kmap_atomic(page);
467d879cb83SAl Viro 		size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
468d879cb83SAl Viro 		kunmap_atomic(kaddr);
469d879cb83SAl Viro 		return wanted;
470d879cb83SAl Viro 	} else
471d879cb83SAl Viro 		return copy_page_from_iter_iovec(page, offset, bytes, i);
472d879cb83SAl Viro }
473d879cb83SAl Viro EXPORT_SYMBOL(copy_page_from_iter);
474d879cb83SAl Viro 
475d879cb83SAl Viro size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
476d879cb83SAl Viro {
477d879cb83SAl Viro 	if (unlikely(bytes > i->count))
478d879cb83SAl Viro 		bytes = i->count;
479d879cb83SAl Viro 
480d879cb83SAl Viro 	if (unlikely(!bytes))
481d879cb83SAl Viro 		return 0;
482d879cb83SAl Viro 
483d879cb83SAl Viro 	iterate_and_advance(i, bytes, v,
484d879cb83SAl Viro 		__clear_user(v.iov_base, v.iov_len),
485d879cb83SAl Viro 		memzero_page(v.bv_page, v.bv_offset, v.bv_len),
486d879cb83SAl Viro 		memset(v.iov_base, 0, v.iov_len)
487d879cb83SAl Viro 	)
488d879cb83SAl Viro 
489d879cb83SAl Viro 	return bytes;
490d879cb83SAl Viro }
491d879cb83SAl Viro EXPORT_SYMBOL(iov_iter_zero);
492d879cb83SAl Viro 
493d879cb83SAl Viro size_t iov_iter_copy_from_user_atomic(struct page *page,
494d879cb83SAl Viro 		struct iov_iter *i, unsigned long offset, size_t bytes)
495d879cb83SAl Viro {
496d879cb83SAl Viro 	char *kaddr = kmap_atomic(page), *p = kaddr + offset;
497d879cb83SAl Viro 	iterate_all_kinds(i, bytes, v,
498d879cb83SAl Viro 		__copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
499d879cb83SAl Viro 					  v.iov_base, v.iov_len),
500d879cb83SAl Viro 		memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
501d879cb83SAl Viro 				 v.bv_offset, v.bv_len),
502d879cb83SAl Viro 		memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
503d879cb83SAl Viro 	)
504d879cb83SAl Viro 	kunmap_atomic(kaddr);
505d879cb83SAl Viro 	return bytes;
506d879cb83SAl Viro }
507d879cb83SAl Viro EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
508d879cb83SAl Viro 
509d879cb83SAl Viro void iov_iter_advance(struct iov_iter *i, size_t size)
510d879cb83SAl Viro {
511d879cb83SAl Viro 	iterate_and_advance(i, size, v, 0, 0, 0)
512d879cb83SAl Viro }
513d879cb83SAl Viro EXPORT_SYMBOL(iov_iter_advance);
514d879cb83SAl Viro 
515d879cb83SAl Viro /*
516d879cb83SAl Viro  * Return the count of just the current iov_iter segment.
517d879cb83SAl Viro  */
518d879cb83SAl Viro size_t iov_iter_single_seg_count(const struct iov_iter *i)
519d879cb83SAl Viro {
520d879cb83SAl Viro 	if (i->nr_segs == 1)
521d879cb83SAl Viro 		return i->count;
522d879cb83SAl Viro 	else if (i->type & ITER_BVEC)
523d879cb83SAl Viro 		return min(i->count, i->bvec->bv_len - i->iov_offset);
524d879cb83SAl Viro 	else
525d879cb83SAl Viro 		return min(i->count, i->iov->iov_len - i->iov_offset);
526d879cb83SAl Viro }
527d879cb83SAl Viro EXPORT_SYMBOL(iov_iter_single_seg_count);
528d879cb83SAl Viro 
529d879cb83SAl Viro void iov_iter_kvec(struct iov_iter *i, int direction,
530d879cb83SAl Viro 			const struct kvec *kvec, unsigned long nr_segs,
531d879cb83SAl Viro 			size_t count)
532d879cb83SAl Viro {
533d879cb83SAl Viro 	BUG_ON(!(direction & ITER_KVEC));
534d879cb83SAl Viro 	i->type = direction;
535d879cb83SAl Viro 	i->kvec = kvec;
536d879cb83SAl Viro 	i->nr_segs = nr_segs;
537d879cb83SAl Viro 	i->iov_offset = 0;
538d879cb83SAl Viro 	i->count = count;
539d879cb83SAl Viro }
540d879cb83SAl Viro EXPORT_SYMBOL(iov_iter_kvec);
541d879cb83SAl Viro 
542d879cb83SAl Viro void iov_iter_bvec(struct iov_iter *i, int direction,
543d879cb83SAl Viro 			const struct bio_vec *bvec, unsigned long nr_segs,
544d879cb83SAl Viro 			size_t count)
545d879cb83SAl Viro {
546d879cb83SAl Viro 	BUG_ON(!(direction & ITER_BVEC));
547d879cb83SAl Viro 	i->type = direction;
548d879cb83SAl Viro 	i->bvec = bvec;
549d879cb83SAl Viro 	i->nr_segs = nr_segs;
550d879cb83SAl Viro 	i->iov_offset = 0;
551d879cb83SAl Viro 	i->count = count;
552d879cb83SAl Viro }
553d879cb83SAl Viro EXPORT_SYMBOL(iov_iter_bvec);
554d879cb83SAl Viro 
555d879cb83SAl Viro unsigned long iov_iter_alignment(const struct iov_iter *i)
556d879cb83SAl Viro {
557d879cb83SAl Viro 	unsigned long res = 0;
558d879cb83SAl Viro 	size_t size = i->count;
559d879cb83SAl Viro 
560d879cb83SAl Viro 	if (!size)
561d879cb83SAl Viro 		return 0;
562d879cb83SAl Viro 
563d879cb83SAl Viro 	iterate_all_kinds(i, size, v,
564d879cb83SAl Viro 		(res |= (unsigned long)v.iov_base | v.iov_len, 0),
565d879cb83SAl Viro 		res |= v.bv_offset | v.bv_len,
566d879cb83SAl Viro 		res |= (unsigned long)v.iov_base | v.iov_len
567d879cb83SAl Viro 	)
568d879cb83SAl Viro 	return res;
569d879cb83SAl Viro }
570d879cb83SAl Viro EXPORT_SYMBOL(iov_iter_alignment);
571d879cb83SAl Viro 
572d879cb83SAl Viro ssize_t iov_iter_get_pages(struct iov_iter *i,
573d879cb83SAl Viro 		   struct page **pages, size_t maxsize, unsigned maxpages,
574d879cb83SAl Viro 		   size_t *start)
575d879cb83SAl Viro {
576d879cb83SAl Viro 	if (maxsize > i->count)
577d879cb83SAl Viro 		maxsize = i->count;
578d879cb83SAl Viro 
579d879cb83SAl Viro 	if (!maxsize)
580d879cb83SAl Viro 		return 0;
581d879cb83SAl Viro 
582d879cb83SAl Viro 	iterate_all_kinds(i, maxsize, v, ({
583d879cb83SAl Viro 		unsigned long addr = (unsigned long)v.iov_base;
584d879cb83SAl Viro 		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
585d879cb83SAl Viro 		int n;
586d879cb83SAl Viro 		int res;
587d879cb83SAl Viro 
588d879cb83SAl Viro 		if (len > maxpages * PAGE_SIZE)
589d879cb83SAl Viro 			len = maxpages * PAGE_SIZE;
590d879cb83SAl Viro 		addr &= ~(PAGE_SIZE - 1);
591d879cb83SAl Viro 		n = DIV_ROUND_UP(len, PAGE_SIZE);
592d879cb83SAl Viro 		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
593d879cb83SAl Viro 		if (unlikely(res < 0))
594d879cb83SAl Viro 			return res;
595d879cb83SAl Viro 		return (res == n ? len : res * PAGE_SIZE) - *start;
596d879cb83SAl Viro 	0;}),({
597d879cb83SAl Viro 		/* can't be more than PAGE_SIZE */
598d879cb83SAl Viro 		*start = v.bv_offset;
599d879cb83SAl Viro 		get_page(*pages = v.bv_page);
600d879cb83SAl Viro 		return v.bv_len;
601d879cb83SAl Viro 	}),({
602d879cb83SAl Viro 		return -EFAULT;
603d879cb83SAl Viro 	})
604d879cb83SAl Viro 	)
605d879cb83SAl Viro 	return 0;
606d879cb83SAl Viro }
607d879cb83SAl Viro EXPORT_SYMBOL(iov_iter_get_pages);
608d879cb83SAl Viro 
609d879cb83SAl Viro static struct page **get_pages_array(size_t n)
610d879cb83SAl Viro {
611d879cb83SAl Viro 	struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
612d879cb83SAl Viro 	if (!p)
613d879cb83SAl Viro 		p = vmalloc(n * sizeof(struct page *));
614d879cb83SAl Viro 	return p;
615d879cb83SAl Viro }
616d879cb83SAl Viro 
617d879cb83SAl Viro ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
618d879cb83SAl Viro 		   struct page ***pages, size_t maxsize,
619d879cb83SAl Viro 		   size_t *start)
620d879cb83SAl Viro {
621d879cb83SAl Viro 	struct page **p;
622d879cb83SAl Viro 
623d879cb83SAl Viro 	if (maxsize > i->count)
624d879cb83SAl Viro 		maxsize = i->count;
625d879cb83SAl Viro 
626d879cb83SAl Viro 	if (!maxsize)
627d879cb83SAl Viro 		return 0;
628d879cb83SAl Viro 
629d879cb83SAl Viro 	iterate_all_kinds(i, maxsize, v, ({
630d879cb83SAl Viro 		unsigned long addr = (unsigned long)v.iov_base;
631d879cb83SAl Viro 		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
632d879cb83SAl Viro 		int n;
633d879cb83SAl Viro 		int res;
634d879cb83SAl Viro 
635d879cb83SAl Viro 		addr &= ~(PAGE_SIZE - 1);
636d879cb83SAl Viro 		n = DIV_ROUND_UP(len, PAGE_SIZE);
637d879cb83SAl Viro 		p = get_pages_array(n);
638d879cb83SAl Viro 		if (!p)
639d879cb83SAl Viro 			return -ENOMEM;
640d879cb83SAl Viro 		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
641d879cb83SAl Viro 		if (unlikely(res < 0)) {
642d879cb83SAl Viro 			kvfree(p);
643d879cb83SAl Viro 			return res;
644d879cb83SAl Viro 		}
645d879cb83SAl Viro 		*pages = p;
646d879cb83SAl Viro 		return (res == n ? len : res * PAGE_SIZE) - *start;
647d879cb83SAl Viro 	0;}),({
648d879cb83SAl Viro 		/* can't be more than PAGE_SIZE */
649d879cb83SAl Viro 		*start = v.bv_offset;
650d879cb83SAl Viro 		*pages = p = get_pages_array(1);
651d879cb83SAl Viro 		if (!p)
652d879cb83SAl Viro 			return -ENOMEM;
653d879cb83SAl Viro 		get_page(*p = v.bv_page);
654d879cb83SAl Viro 		return v.bv_len;
655d879cb83SAl Viro 	}),({
656d879cb83SAl Viro 		return -EFAULT;
657d879cb83SAl Viro 	})
658d879cb83SAl Viro 	)
659d879cb83SAl Viro 	return 0;
660d879cb83SAl Viro }
661d879cb83SAl Viro EXPORT_SYMBOL(iov_iter_get_pages_alloc);
662d879cb83SAl Viro 
663d879cb83SAl Viro size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
664d879cb83SAl Viro 			       struct iov_iter *i)
665d879cb83SAl Viro {
666d879cb83SAl Viro 	char *to = addr;
667d879cb83SAl Viro 	__wsum sum, next;
668d879cb83SAl Viro 	size_t off = 0;
669d879cb83SAl Viro 	if (unlikely(bytes > i->count))
670d879cb83SAl Viro 		bytes = i->count;
671d879cb83SAl Viro 
672d879cb83SAl Viro 	if (unlikely(!bytes))
673d879cb83SAl Viro 		return 0;
674d879cb83SAl Viro 
675d879cb83SAl Viro 	sum = *csum;
676d879cb83SAl Viro 	iterate_and_advance(i, bytes, v, ({
677d879cb83SAl Viro 		int err = 0;
678d879cb83SAl Viro 		next = csum_and_copy_from_user(v.iov_base,
679d879cb83SAl Viro 					       (to += v.iov_len) - v.iov_len,
680d879cb83SAl Viro 					       v.iov_len, 0, &err);
681d879cb83SAl Viro 		if (!err) {
682d879cb83SAl Viro 			sum = csum_block_add(sum, next, off);
683d879cb83SAl Viro 			off += v.iov_len;
684d879cb83SAl Viro 		}
685d879cb83SAl Viro 		err ? v.iov_len : 0;
686d879cb83SAl Viro 	}), ({
687d879cb83SAl Viro 		char *p = kmap_atomic(v.bv_page);
688d879cb83SAl Viro 		next = csum_partial_copy_nocheck(p + v.bv_offset,
689d879cb83SAl Viro 						 (to += v.bv_len) - v.bv_len,
690d879cb83SAl Viro 						 v.bv_len, 0);
691d879cb83SAl Viro 		kunmap_atomic(p);
692d879cb83SAl Viro 		sum = csum_block_add(sum, next, off);
693d879cb83SAl Viro 		off += v.bv_len;
694d879cb83SAl Viro 	}),({
695d879cb83SAl Viro 		next = csum_partial_copy_nocheck(v.iov_base,
696d879cb83SAl Viro 						 (to += v.iov_len) - v.iov_len,
697d879cb83SAl Viro 						 v.iov_len, 0);
698d879cb83SAl Viro 		sum = csum_block_add(sum, next, off);
699d879cb83SAl Viro 		off += v.iov_len;
700d879cb83SAl Viro 	})
701d879cb83SAl Viro 	)
702d879cb83SAl Viro 	*csum = sum;
703d879cb83SAl Viro 	return bytes;
704d879cb83SAl Viro }
705d879cb83SAl Viro EXPORT_SYMBOL(csum_and_copy_from_iter);
706d879cb83SAl Viro 
70736f7a8a4SAl Viro size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
708d879cb83SAl Viro 			     struct iov_iter *i)
709d879cb83SAl Viro {
71036f7a8a4SAl Viro 	const char *from = addr;
711d879cb83SAl Viro 	__wsum sum, next;
712d879cb83SAl Viro 	size_t off = 0;
713d879cb83SAl Viro 	if (unlikely(bytes > i->count))
714d879cb83SAl Viro 		bytes = i->count;
715d879cb83SAl Viro 
716d879cb83SAl Viro 	if (unlikely(!bytes))
717d879cb83SAl Viro 		return 0;
718d879cb83SAl Viro 
719d879cb83SAl Viro 	sum = *csum;
720d879cb83SAl Viro 	iterate_and_advance(i, bytes, v, ({
721d879cb83SAl Viro 		int err = 0;
722d879cb83SAl Viro 		next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
723d879cb83SAl Viro 					     v.iov_base,
724d879cb83SAl Viro 					     v.iov_len, 0, &err);
725d879cb83SAl Viro 		if (!err) {
726d879cb83SAl Viro 			sum = csum_block_add(sum, next, off);
727d879cb83SAl Viro 			off += v.iov_len;
728d879cb83SAl Viro 		}
729d879cb83SAl Viro 		err ? v.iov_len : 0;
730d879cb83SAl Viro 	}), ({
731d879cb83SAl Viro 		char *p = kmap_atomic(v.bv_page);
732d879cb83SAl Viro 		next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
733d879cb83SAl Viro 						 p + v.bv_offset,
734d879cb83SAl Viro 						 v.bv_len, 0);
735d879cb83SAl Viro 		kunmap_atomic(p);
736d879cb83SAl Viro 		sum = csum_block_add(sum, next, off);
737d879cb83SAl Viro 		off += v.bv_len;
738d879cb83SAl Viro 	}),({
739d879cb83SAl Viro 		next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
740d879cb83SAl Viro 						 v.iov_base,
741d879cb83SAl Viro 						 v.iov_len, 0);
742d879cb83SAl Viro 		sum = csum_block_add(sum, next, off);
743d879cb83SAl Viro 		off += v.iov_len;
744d879cb83SAl Viro 	})
745d879cb83SAl Viro 	)
746d879cb83SAl Viro 	*csum = sum;
747d879cb83SAl Viro 	return bytes;
748d879cb83SAl Viro }
749d879cb83SAl Viro EXPORT_SYMBOL(csum_and_copy_to_iter);
750d879cb83SAl Viro 
751d879cb83SAl Viro int iov_iter_npages(const struct iov_iter *i, int maxpages)
752d879cb83SAl Viro {
753d879cb83SAl Viro 	size_t size = i->count;
754d879cb83SAl Viro 	int npages = 0;
755d879cb83SAl Viro 
756d879cb83SAl Viro 	if (!size)
757d879cb83SAl Viro 		return 0;
758d879cb83SAl Viro 
759d879cb83SAl Viro 	iterate_all_kinds(i, size, v, ({
760d879cb83SAl Viro 		unsigned long p = (unsigned long)v.iov_base;
761d879cb83SAl Viro 		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
762d879cb83SAl Viro 			- p / PAGE_SIZE;
763d879cb83SAl Viro 		if (npages >= maxpages)
764d879cb83SAl Viro 			return maxpages;
765d879cb83SAl Viro 	0;}),({
766d879cb83SAl Viro 		npages++;
767d879cb83SAl Viro 		if (npages >= maxpages)
768d879cb83SAl Viro 			return maxpages;
769d879cb83SAl Viro 	}),({
770d879cb83SAl Viro 		unsigned long p = (unsigned long)v.iov_base;
771d879cb83SAl Viro 		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
772d879cb83SAl Viro 			- p / PAGE_SIZE;
773d879cb83SAl Viro 		if (npages >= maxpages)
774d879cb83SAl Viro 			return maxpages;
775d879cb83SAl Viro 	})
776d879cb83SAl Viro 	)
777d879cb83SAl Viro 	return npages;
778d879cb83SAl Viro }
779d879cb83SAl Viro EXPORT_SYMBOL(iov_iter_npages);
780d879cb83SAl Viro 
781d879cb83SAl Viro const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
782d879cb83SAl Viro {
783d879cb83SAl Viro 	*new = *old;
784d879cb83SAl Viro 	if (new->type & ITER_BVEC)
785d879cb83SAl Viro 		return new->bvec = kmemdup(new->bvec,
786d879cb83SAl Viro 				    new->nr_segs * sizeof(struct bio_vec),
787d879cb83SAl Viro 				    flags);
788d879cb83SAl Viro 	else
789d879cb83SAl Viro 		/* iovec and kvec have identical layout */
790d879cb83SAl Viro 		return new->iov = kmemdup(new->iov,
791d879cb83SAl Viro 				   new->nr_segs * sizeof(struct iovec),
792d879cb83SAl Viro 				   flags);
793d879cb83SAl Viro }
794d879cb83SAl Viro EXPORT_SYMBOL(dup_iter);
795bc917be8SAl Viro 
796bc917be8SAl Viro int import_iovec(int type, const struct iovec __user * uvector,
797bc917be8SAl Viro 		 unsigned nr_segs, unsigned fast_segs,
798bc917be8SAl Viro 		 struct iovec **iov, struct iov_iter *i)
799bc917be8SAl Viro {
800bc917be8SAl Viro 	ssize_t n;
801bc917be8SAl Viro 	struct iovec *p;
802bc917be8SAl Viro 	n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
803bc917be8SAl Viro 				  *iov, &p);
804bc917be8SAl Viro 	if (n < 0) {
805bc917be8SAl Viro 		if (p != *iov)
806bc917be8SAl Viro 			kfree(p);
807bc917be8SAl Viro 		*iov = NULL;
808bc917be8SAl Viro 		return n;
809bc917be8SAl Viro 	}
810bc917be8SAl Viro 	iov_iter_init(i, type, p, nr_segs, n);
811bc917be8SAl Viro 	*iov = p == *iov ? NULL : p;
812bc917be8SAl Viro 	return 0;
813bc917be8SAl Viro }
814bc917be8SAl Viro EXPORT_SYMBOL(import_iovec);
815bc917be8SAl Viro 
816bc917be8SAl Viro #ifdef CONFIG_COMPAT
817bc917be8SAl Viro #include <linux/compat.h>
818bc917be8SAl Viro 
819bc917be8SAl Viro int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
820bc917be8SAl Viro 		 unsigned nr_segs, unsigned fast_segs,
821bc917be8SAl Viro 		 struct iovec **iov, struct iov_iter *i)
822bc917be8SAl Viro {
823bc917be8SAl Viro 	ssize_t n;
824bc917be8SAl Viro 	struct iovec *p;
825bc917be8SAl Viro 	n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
826bc917be8SAl Viro 				  *iov, &p);
827bc917be8SAl Viro 	if (n < 0) {
828bc917be8SAl Viro 		if (p != *iov)
829bc917be8SAl Viro 			kfree(p);
830bc917be8SAl Viro 		*iov = NULL;
831bc917be8SAl Viro 		return n;
832bc917be8SAl Viro 	}
833bc917be8SAl Viro 	iov_iter_init(i, type, p, nr_segs, n);
834bc917be8SAl Viro 	*iov = p == *iov ? NULL : p;
835bc917be8SAl Viro 	return 0;
836bc917be8SAl Viro }
837bc917be8SAl Viro #endif
838bc917be8SAl Viro 
839bc917be8SAl Viro int import_single_range(int rw, void __user *buf, size_t len,
840bc917be8SAl Viro 		 struct iovec *iov, struct iov_iter *i)
841bc917be8SAl Viro {
842bc917be8SAl Viro 	if (len > MAX_RW_COUNT)
843bc917be8SAl Viro 		len = MAX_RW_COUNT;
844bc917be8SAl Viro 	if (unlikely(!access_ok(!rw, buf, len)))
845bc917be8SAl Viro 		return -EFAULT;
846bc917be8SAl Viro 
847bc917be8SAl Viro 	iov->iov_base = buf;
848bc917be8SAl Viro 	iov->iov_len = len;
849bc917be8SAl Viro 	iov_iter_init(i, rw, iov, 1, len);
850bc917be8SAl Viro 	return 0;
851bc917be8SAl Viro }
852e1267585SAl Viro EXPORT_SYMBOL(import_single_range);
853