xref: /openbmc/linux/lib/iov_iter.c (revision 3fc40265)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <crypto/hash.h>
3 #include <linux/export.h>
4 #include <linux/bvec.h>
5 #include <linux/fault-inject-usercopy.h>
6 #include <linux/uio.h>
7 #include <linux/pagemap.h>
8 #include <linux/highmem.h>
9 #include <linux/slab.h>
10 #include <linux/vmalloc.h>
11 #include <linux/splice.h>
12 #include <linux/compat.h>
13 #include <net/checksum.h>
14 #include <linux/scatterlist.h>
15 #include <linux/instrumented.h>
16 
17 /* covers ubuf and kbuf alike */
18 #define iterate_buf(i, n, base, len, off, __p, STEP) {		\
19 	size_t __maybe_unused off = 0;				\
20 	len = n;						\
21 	base = __p + i->iov_offset;				\
22 	len -= (STEP);						\
23 	i->iov_offset += len;					\
24 	n = len;						\
25 }
26 
27 /* covers iovec and kvec alike */
28 #define iterate_iovec(i, n, base, len, off, __p, STEP) {	\
29 	size_t off = 0;						\
30 	size_t skip = i->iov_offset;				\
31 	do {							\
32 		len = min(n, __p->iov_len - skip);		\
33 		if (likely(len)) {				\
34 			base = __p->iov_base + skip;		\
35 			len -= (STEP);				\
36 			off += len;				\
37 			skip += len;				\
38 			n -= len;				\
39 			if (skip < __p->iov_len)		\
40 				break;				\
41 		}						\
42 		__p++;						\
43 		skip = 0;					\
44 	} while (n);						\
45 	i->iov_offset = skip;					\
46 	n = off;						\
47 }
48 
49 #define iterate_bvec(i, n, base, len, off, p, STEP) {		\
50 	size_t off = 0;						\
51 	unsigned skip = i->iov_offset;				\
52 	while (n) {						\
53 		unsigned offset = p->bv_offset + skip;		\
54 		unsigned left;					\
55 		void *kaddr = kmap_local_page(p->bv_page +	\
56 					offset / PAGE_SIZE);	\
57 		base = kaddr + offset % PAGE_SIZE;		\
58 		len = min(min(n, (size_t)(p->bv_len - skip)),	\
59 		     (size_t)(PAGE_SIZE - offset % PAGE_SIZE));	\
60 		left = (STEP);					\
61 		kunmap_local(kaddr);				\
62 		len -= left;					\
63 		off += len;					\
64 		skip += len;					\
65 		if (skip == p->bv_len) {			\
66 			skip = 0;				\
67 			p++;					\
68 		}						\
69 		n -= len;					\
70 		if (left)					\
71 			break;					\
72 	}							\
73 	i->iov_offset = skip;					\
74 	n = off;						\
75 }
76 
77 #define iterate_xarray(i, n, base, len, __off, STEP) {		\
78 	__label__ __out;					\
79 	size_t __off = 0;					\
80 	struct folio *folio;					\
81 	loff_t start = i->xarray_start + i->iov_offset;		\
82 	pgoff_t index = start / PAGE_SIZE;			\
83 	XA_STATE(xas, i->xarray, index);			\
84 								\
85 	len = PAGE_SIZE - offset_in_page(start);		\
86 	rcu_read_lock();					\
87 	xas_for_each(&xas, folio, ULONG_MAX) {			\
88 		unsigned left;					\
89 		size_t offset;					\
90 		if (xas_retry(&xas, folio))			\
91 			continue;				\
92 		if (WARN_ON(xa_is_value(folio)))		\
93 			break;					\
94 		if (WARN_ON(folio_test_hugetlb(folio)))		\
95 			break;					\
96 		offset = offset_in_folio(folio, start + __off);	\
97 		while (offset < folio_size(folio)) {		\
98 			base = kmap_local_folio(folio, offset);	\
99 			len = min(n, len);			\
100 			left = (STEP);				\
101 			kunmap_local(base);			\
102 			len -= left;				\
103 			__off += len;				\
104 			n -= len;				\
105 			if (left || n == 0)			\
106 				goto __out;			\
107 			offset += len;				\
108 			len = PAGE_SIZE;			\
109 		}						\
110 	}							\
111 __out:								\
112 	rcu_read_unlock();					\
113 	i->iov_offset += __off;					\
114 	n = __off;						\
115 }
116 
117 #define __iterate_and_advance(i, n, base, len, off, I, K) {	\
118 	if (unlikely(i->count < n))				\
119 		n = i->count;					\
120 	if (likely(n)) {					\
121 		if (likely(iter_is_ubuf(i))) {			\
122 			void __user *base;			\
123 			size_t len;				\
124 			iterate_buf(i, n, base, len, off,	\
125 						i->ubuf, (I)) 	\
126 		} else if (likely(iter_is_iovec(i))) {		\
127 			const struct iovec *iov = iter_iov(i);	\
128 			void __user *base;			\
129 			size_t len;				\
130 			iterate_iovec(i, n, base, len, off,	\
131 						iov, (I))	\
132 			i->nr_segs -= iov - iter_iov(i);	\
133 			i->__iov = iov;				\
134 		} else if (iov_iter_is_bvec(i)) {		\
135 			const struct bio_vec *bvec = i->bvec;	\
136 			void *base;				\
137 			size_t len;				\
138 			iterate_bvec(i, n, base, len, off,	\
139 						bvec, (K))	\
140 			i->nr_segs -= bvec - i->bvec;		\
141 			i->bvec = bvec;				\
142 		} else if (iov_iter_is_kvec(i)) {		\
143 			const struct kvec *kvec = i->kvec;	\
144 			void *base;				\
145 			size_t len;				\
146 			iterate_iovec(i, n, base, len, off,	\
147 						kvec, (K))	\
148 			i->nr_segs -= kvec - i->kvec;		\
149 			i->kvec = kvec;				\
150 		} else if (iov_iter_is_xarray(i)) {		\
151 			void *base;				\
152 			size_t len;				\
153 			iterate_xarray(i, n, base, len, off,	\
154 							(K))	\
155 		}						\
156 		i->count -= n;					\
157 	}							\
158 }
159 #define iterate_and_advance(i, n, base, len, off, I, K) \
160 	__iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
161 
162 static int copyout(void __user *to, const void *from, size_t n)
163 {
164 	if (should_fail_usercopy())
165 		return n;
166 	if (access_ok(to, n)) {
167 		instrument_copy_to_user(to, from, n);
168 		n = raw_copy_to_user(to, from, n);
169 	}
170 	return n;
171 }
172 
173 static int copyout_nofault(void __user *to, const void *from, size_t n)
174 {
175 	long res;
176 
177 	if (should_fail_usercopy())
178 		return n;
179 
180 	res = copy_to_user_nofault(to, from, n);
181 
182 	return res < 0 ? n : res;
183 }
184 
185 static int copyin(void *to, const void __user *from, size_t n)
186 {
187 	size_t res = n;
188 
189 	if (should_fail_usercopy())
190 		return n;
191 	if (access_ok(from, n)) {
192 		instrument_copy_from_user_before(to, from, n);
193 		res = raw_copy_from_user(to, from, n);
194 		instrument_copy_from_user_after(to, from, n, res);
195 	}
196 	return res;
197 }
198 
199 /*
200  * fault_in_iov_iter_readable - fault in iov iterator for reading
201  * @i: iterator
202  * @size: maximum length
203  *
204  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
205  * @size.  For each iovec, fault in each page that constitutes the iovec.
206  *
207  * Returns the number of bytes not faulted in (like copy_to_user() and
208  * copy_from_user()).
209  *
210  * Always returns 0 for non-userspace iterators.
211  */
212 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
213 {
214 	if (iter_is_ubuf(i)) {
215 		size_t n = min(size, iov_iter_count(i));
216 		n -= fault_in_readable(i->ubuf + i->iov_offset, n);
217 		return size - n;
218 	} else if (iter_is_iovec(i)) {
219 		size_t count = min(size, iov_iter_count(i));
220 		const struct iovec *p;
221 		size_t skip;
222 
223 		size -= count;
224 		for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
225 			size_t len = min(count, p->iov_len - skip);
226 			size_t ret;
227 
228 			if (unlikely(!len))
229 				continue;
230 			ret = fault_in_readable(p->iov_base + skip, len);
231 			count -= len - ret;
232 			if (ret)
233 				break;
234 		}
235 		return count + size;
236 	}
237 	return 0;
238 }
239 EXPORT_SYMBOL(fault_in_iov_iter_readable);
240 
241 /*
242  * fault_in_iov_iter_writeable - fault in iov iterator for writing
243  * @i: iterator
244  * @size: maximum length
245  *
246  * Faults in the iterator using get_user_pages(), i.e., without triggering
247  * hardware page faults.  This is primarily useful when we already know that
248  * some or all of the pages in @i aren't in memory.
249  *
250  * Returns the number of bytes not faulted in, like copy_to_user() and
251  * copy_from_user().
252  *
253  * Always returns 0 for non-user-space iterators.
254  */
255 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
256 {
257 	if (iter_is_ubuf(i)) {
258 		size_t n = min(size, iov_iter_count(i));
259 		n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n);
260 		return size - n;
261 	} else if (iter_is_iovec(i)) {
262 		size_t count = min(size, iov_iter_count(i));
263 		const struct iovec *p;
264 		size_t skip;
265 
266 		size -= count;
267 		for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
268 			size_t len = min(count, p->iov_len - skip);
269 			size_t ret;
270 
271 			if (unlikely(!len))
272 				continue;
273 			ret = fault_in_safe_writeable(p->iov_base + skip, len);
274 			count -= len - ret;
275 			if (ret)
276 				break;
277 		}
278 		return count + size;
279 	}
280 	return 0;
281 }
282 EXPORT_SYMBOL(fault_in_iov_iter_writeable);
283 
284 void iov_iter_init(struct iov_iter *i, unsigned int direction,
285 			const struct iovec *iov, unsigned long nr_segs,
286 			size_t count)
287 {
288 	WARN_ON(direction & ~(READ | WRITE));
289 	*i = (struct iov_iter) {
290 		.iter_type = ITER_IOVEC,
291 		.copy_mc = false,
292 		.nofault = false,
293 		.user_backed = true,
294 		.data_source = direction,
295 		.__iov = iov,
296 		.nr_segs = nr_segs,
297 		.iov_offset = 0,
298 		.count = count
299 	};
300 }
301 EXPORT_SYMBOL(iov_iter_init);
302 
303 static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
304 			      __wsum sum, size_t off)
305 {
306 	__wsum next = csum_partial_copy_nocheck(from, to, len);
307 	return csum_block_add(sum, next, off);
308 }
309 
310 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
311 {
312 	if (WARN_ON_ONCE(i->data_source))
313 		return 0;
314 	if (user_backed_iter(i))
315 		might_fault();
316 	iterate_and_advance(i, bytes, base, len, off,
317 		copyout(base, addr + off, len),
318 		memcpy(base, addr + off, len)
319 	)
320 
321 	return bytes;
322 }
323 EXPORT_SYMBOL(_copy_to_iter);
324 
325 #ifdef CONFIG_ARCH_HAS_COPY_MC
326 static int copyout_mc(void __user *to, const void *from, size_t n)
327 {
328 	if (access_ok(to, n)) {
329 		instrument_copy_to_user(to, from, n);
330 		n = copy_mc_to_user((__force void *) to, from, n);
331 	}
332 	return n;
333 }
334 
335 /**
336  * _copy_mc_to_iter - copy to iter with source memory error exception handling
337  * @addr: source kernel address
338  * @bytes: total transfer length
339  * @i: destination iterator
340  *
341  * The pmem driver deploys this for the dax operation
342  * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
343  * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
344  * successfully copied.
345  *
346  * The main differences between this and typical _copy_to_iter().
347  *
348  * * Typical tail/residue handling after a fault retries the copy
349  *   byte-by-byte until the fault happens again. Re-triggering machine
350  *   checks is potentially fatal so the implementation uses source
351  *   alignment and poison alignment assumptions to avoid re-triggering
352  *   hardware exceptions.
353  *
354  * * ITER_KVEC and ITER_BVEC can return short copies.  Compare to
355  *   copy_to_iter() where only ITER_IOVEC attempts might return a short copy.
356  *
357  * Return: number of bytes copied (may be %0)
358  */
359 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
360 {
361 	if (WARN_ON_ONCE(i->data_source))
362 		return 0;
363 	if (user_backed_iter(i))
364 		might_fault();
365 	__iterate_and_advance(i, bytes, base, len, off,
366 		copyout_mc(base, addr + off, len),
367 		copy_mc_to_kernel(base, addr + off, len)
368 	)
369 
370 	return bytes;
371 }
372 EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
373 #endif /* CONFIG_ARCH_HAS_COPY_MC */
374 
375 static void *memcpy_from_iter(struct iov_iter *i, void *to, const void *from,
376 				 size_t size)
377 {
378 	if (iov_iter_is_copy_mc(i))
379 		return (void *)copy_mc_to_kernel(to, from, size);
380 	return memcpy(to, from, size);
381 }
382 
383 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
384 {
385 	if (WARN_ON_ONCE(!i->data_source))
386 		return 0;
387 
388 	if (user_backed_iter(i))
389 		might_fault();
390 	iterate_and_advance(i, bytes, base, len, off,
391 		copyin(addr + off, base, len),
392 		memcpy_from_iter(i, addr + off, base, len)
393 	)
394 
395 	return bytes;
396 }
397 EXPORT_SYMBOL(_copy_from_iter);
398 
399 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
400 {
401 	if (WARN_ON_ONCE(!i->data_source))
402 		return 0;
403 
404 	iterate_and_advance(i, bytes, base, len, off,
405 		__copy_from_user_inatomic_nocache(addr + off, base, len),
406 		memcpy(addr + off, base, len)
407 	)
408 
409 	return bytes;
410 }
411 EXPORT_SYMBOL(_copy_from_iter_nocache);
412 
413 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
414 /**
415  * _copy_from_iter_flushcache - write destination through cpu cache
416  * @addr: destination kernel address
417  * @bytes: total transfer length
418  * @i: source iterator
419  *
420  * The pmem driver arranges for filesystem-dax to use this facility via
421  * dax_copy_from_iter() for ensuring that writes to persistent memory
422  * are flushed through the CPU cache. It is differentiated from
423  * _copy_from_iter_nocache() in that guarantees all data is flushed for
424  * all iterator types. The _copy_from_iter_nocache() only attempts to
425  * bypass the cache for the ITER_IOVEC case, and on some archs may use
426  * instructions that strand dirty-data in the cache.
427  *
428  * Return: number of bytes copied (may be %0)
429  */
430 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
431 {
432 	if (WARN_ON_ONCE(!i->data_source))
433 		return 0;
434 
435 	iterate_and_advance(i, bytes, base, len, off,
436 		__copy_from_user_flushcache(addr + off, base, len),
437 		memcpy_flushcache(addr + off, base, len)
438 	)
439 
440 	return bytes;
441 }
442 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
443 #endif
444 
445 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
446 {
447 	struct page *head;
448 	size_t v = n + offset;
449 
450 	/*
451 	 * The general case needs to access the page order in order
452 	 * to compute the page size.
453 	 * However, we mostly deal with order-0 pages and thus can
454 	 * avoid a possible cache line miss for requests that fit all
455 	 * page orders.
456 	 */
457 	if (n <= v && v <= PAGE_SIZE)
458 		return true;
459 
460 	head = compound_head(page);
461 	v += (page - head) << PAGE_SHIFT;
462 
463 	if (WARN_ON(n > v || v > page_size(head)))
464 		return false;
465 	return true;
466 }
467 
468 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
469 			 struct iov_iter *i)
470 {
471 	size_t res = 0;
472 	if (!page_copy_sane(page, offset, bytes))
473 		return 0;
474 	if (WARN_ON_ONCE(i->data_source))
475 		return 0;
476 	page += offset / PAGE_SIZE; // first subpage
477 	offset %= PAGE_SIZE;
478 	while (1) {
479 		void *kaddr = kmap_local_page(page);
480 		size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
481 		n = _copy_to_iter(kaddr + offset, n, i);
482 		kunmap_local(kaddr);
483 		res += n;
484 		bytes -= n;
485 		if (!bytes || !n)
486 			break;
487 		offset += n;
488 		if (offset == PAGE_SIZE) {
489 			page++;
490 			offset = 0;
491 		}
492 	}
493 	return res;
494 }
495 EXPORT_SYMBOL(copy_page_to_iter);
496 
497 size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t bytes,
498 				 struct iov_iter *i)
499 {
500 	size_t res = 0;
501 
502 	if (!page_copy_sane(page, offset, bytes))
503 		return 0;
504 	if (WARN_ON_ONCE(i->data_source))
505 		return 0;
506 	page += offset / PAGE_SIZE; // first subpage
507 	offset %= PAGE_SIZE;
508 	while (1) {
509 		void *kaddr = kmap_local_page(page);
510 		size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
511 
512 		iterate_and_advance(i, n, base, len, off,
513 			copyout_nofault(base, kaddr + offset + off, len),
514 			memcpy(base, kaddr + offset + off, len)
515 		)
516 		kunmap_local(kaddr);
517 		res += n;
518 		bytes -= n;
519 		if (!bytes || !n)
520 			break;
521 		offset += n;
522 		if (offset == PAGE_SIZE) {
523 			page++;
524 			offset = 0;
525 		}
526 	}
527 	return res;
528 }
529 EXPORT_SYMBOL(copy_page_to_iter_nofault);
530 
531 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
532 			 struct iov_iter *i)
533 {
534 	size_t res = 0;
535 	if (!page_copy_sane(page, offset, bytes))
536 		return 0;
537 	page += offset / PAGE_SIZE; // first subpage
538 	offset %= PAGE_SIZE;
539 	while (1) {
540 		void *kaddr = kmap_local_page(page);
541 		size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
542 		n = _copy_from_iter(kaddr + offset, n, i);
543 		kunmap_local(kaddr);
544 		res += n;
545 		bytes -= n;
546 		if (!bytes || !n)
547 			break;
548 		offset += n;
549 		if (offset == PAGE_SIZE) {
550 			page++;
551 			offset = 0;
552 		}
553 	}
554 	return res;
555 }
556 EXPORT_SYMBOL(copy_page_from_iter);
557 
558 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
559 {
560 	iterate_and_advance(i, bytes, base, len, count,
561 		clear_user(base, len),
562 		memset(base, 0, len)
563 	)
564 
565 	return bytes;
566 }
567 EXPORT_SYMBOL(iov_iter_zero);
568 
569 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
570 				  struct iov_iter *i)
571 {
572 	char *kaddr = kmap_atomic(page), *p = kaddr + offset;
573 	if (!page_copy_sane(page, offset, bytes)) {
574 		kunmap_atomic(kaddr);
575 		return 0;
576 	}
577 	if (WARN_ON_ONCE(!i->data_source)) {
578 		kunmap_atomic(kaddr);
579 		return 0;
580 	}
581 	iterate_and_advance(i, bytes, base, len, off,
582 		copyin(p + off, base, len),
583 		memcpy_from_iter(i, p + off, base, len)
584 	)
585 	kunmap_atomic(kaddr);
586 	return bytes;
587 }
588 EXPORT_SYMBOL(copy_page_from_iter_atomic);
589 
590 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
591 {
592 	const struct bio_vec *bvec, *end;
593 
594 	if (!i->count)
595 		return;
596 	i->count -= size;
597 
598 	size += i->iov_offset;
599 
600 	for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) {
601 		if (likely(size < bvec->bv_len))
602 			break;
603 		size -= bvec->bv_len;
604 	}
605 	i->iov_offset = size;
606 	i->nr_segs -= bvec - i->bvec;
607 	i->bvec = bvec;
608 }
609 
610 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
611 {
612 	const struct iovec *iov, *end;
613 
614 	if (!i->count)
615 		return;
616 	i->count -= size;
617 
618 	size += i->iov_offset; // from beginning of current segment
619 	for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) {
620 		if (likely(size < iov->iov_len))
621 			break;
622 		size -= iov->iov_len;
623 	}
624 	i->iov_offset = size;
625 	i->nr_segs -= iov - iter_iov(i);
626 	i->__iov = iov;
627 }
628 
629 void iov_iter_advance(struct iov_iter *i, size_t size)
630 {
631 	if (unlikely(i->count < size))
632 		size = i->count;
633 	if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) {
634 		i->iov_offset += size;
635 		i->count -= size;
636 	} else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
637 		/* iovec and kvec have identical layouts */
638 		iov_iter_iovec_advance(i, size);
639 	} else if (iov_iter_is_bvec(i)) {
640 		iov_iter_bvec_advance(i, size);
641 	} else if (iov_iter_is_discard(i)) {
642 		i->count -= size;
643 	}
644 }
645 EXPORT_SYMBOL(iov_iter_advance);
646 
647 void iov_iter_revert(struct iov_iter *i, size_t unroll)
648 {
649 	if (!unroll)
650 		return;
651 	if (WARN_ON(unroll > MAX_RW_COUNT))
652 		return;
653 	i->count += unroll;
654 	if (unlikely(iov_iter_is_discard(i)))
655 		return;
656 	if (unroll <= i->iov_offset) {
657 		i->iov_offset -= unroll;
658 		return;
659 	}
660 	unroll -= i->iov_offset;
661 	if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) {
662 		BUG(); /* We should never go beyond the start of the specified
663 			* range since we might then be straying into pages that
664 			* aren't pinned.
665 			*/
666 	} else if (iov_iter_is_bvec(i)) {
667 		const struct bio_vec *bvec = i->bvec;
668 		while (1) {
669 			size_t n = (--bvec)->bv_len;
670 			i->nr_segs++;
671 			if (unroll <= n) {
672 				i->bvec = bvec;
673 				i->iov_offset = n - unroll;
674 				return;
675 			}
676 			unroll -= n;
677 		}
678 	} else { /* same logics for iovec and kvec */
679 		const struct iovec *iov = iter_iov(i);
680 		while (1) {
681 			size_t n = (--iov)->iov_len;
682 			i->nr_segs++;
683 			if (unroll <= n) {
684 				i->__iov = iov;
685 				i->iov_offset = n - unroll;
686 				return;
687 			}
688 			unroll -= n;
689 		}
690 	}
691 }
692 EXPORT_SYMBOL(iov_iter_revert);
693 
694 /*
695  * Return the count of just the current iov_iter segment.
696  */
697 size_t iov_iter_single_seg_count(const struct iov_iter *i)
698 {
699 	if (i->nr_segs > 1) {
700 		if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
701 			return min(i->count, iter_iov(i)->iov_len - i->iov_offset);
702 		if (iov_iter_is_bvec(i))
703 			return min(i->count, i->bvec->bv_len - i->iov_offset);
704 	}
705 	return i->count;
706 }
707 EXPORT_SYMBOL(iov_iter_single_seg_count);
708 
709 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
710 			const struct kvec *kvec, unsigned long nr_segs,
711 			size_t count)
712 {
713 	WARN_ON(direction & ~(READ | WRITE));
714 	*i = (struct iov_iter){
715 		.iter_type = ITER_KVEC,
716 		.copy_mc = false,
717 		.data_source = direction,
718 		.kvec = kvec,
719 		.nr_segs = nr_segs,
720 		.iov_offset = 0,
721 		.count = count
722 	};
723 }
724 EXPORT_SYMBOL(iov_iter_kvec);
725 
726 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
727 			const struct bio_vec *bvec, unsigned long nr_segs,
728 			size_t count)
729 {
730 	WARN_ON(direction & ~(READ | WRITE));
731 	*i = (struct iov_iter){
732 		.iter_type = ITER_BVEC,
733 		.copy_mc = false,
734 		.data_source = direction,
735 		.bvec = bvec,
736 		.nr_segs = nr_segs,
737 		.iov_offset = 0,
738 		.count = count
739 	};
740 }
741 EXPORT_SYMBOL(iov_iter_bvec);
742 
743 /**
744  * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
745  * @i: The iterator to initialise.
746  * @direction: The direction of the transfer.
747  * @xarray: The xarray to access.
748  * @start: The start file position.
749  * @count: The size of the I/O buffer in bytes.
750  *
751  * Set up an I/O iterator to either draw data out of the pages attached to an
752  * inode or to inject data into those pages.  The pages *must* be prevented
753  * from evaporation, either by taking a ref on them or locking them by the
754  * caller.
755  */
756 void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
757 		     struct xarray *xarray, loff_t start, size_t count)
758 {
759 	BUG_ON(direction & ~1);
760 	*i = (struct iov_iter) {
761 		.iter_type = ITER_XARRAY,
762 		.copy_mc = false,
763 		.data_source = direction,
764 		.xarray = xarray,
765 		.xarray_start = start,
766 		.count = count,
767 		.iov_offset = 0
768 	};
769 }
770 EXPORT_SYMBOL(iov_iter_xarray);
771 
772 /**
773  * iov_iter_discard - Initialise an I/O iterator that discards data
774  * @i: The iterator to initialise.
775  * @direction: The direction of the transfer.
776  * @count: The size of the I/O buffer in bytes.
777  *
778  * Set up an I/O iterator that just discards everything that's written to it.
779  * It's only available as a READ iterator.
780  */
781 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
782 {
783 	BUG_ON(direction != READ);
784 	*i = (struct iov_iter){
785 		.iter_type = ITER_DISCARD,
786 		.copy_mc = false,
787 		.data_source = false,
788 		.count = count,
789 		.iov_offset = 0
790 	};
791 }
792 EXPORT_SYMBOL(iov_iter_discard);
793 
794 static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
795 				   unsigned len_mask)
796 {
797 	size_t size = i->count;
798 	size_t skip = i->iov_offset;
799 	unsigned k;
800 
801 	for (k = 0; k < i->nr_segs; k++, skip = 0) {
802 		const struct iovec *iov = iter_iov(i) + k;
803 		size_t len = iov->iov_len - skip;
804 
805 		if (len > size)
806 			len = size;
807 		if (len & len_mask)
808 			return false;
809 		if ((unsigned long)(iov->iov_base + skip) & addr_mask)
810 			return false;
811 
812 		size -= len;
813 		if (!size)
814 			break;
815 	}
816 	return true;
817 }
818 
819 static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
820 				  unsigned len_mask)
821 {
822 	size_t size = i->count;
823 	unsigned skip = i->iov_offset;
824 	unsigned k;
825 
826 	for (k = 0; k < i->nr_segs; k++, skip = 0) {
827 		size_t len = i->bvec[k].bv_len - skip;
828 
829 		if (len > size)
830 			len = size;
831 		if (len & len_mask)
832 			return false;
833 		if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask)
834 			return false;
835 
836 		size -= len;
837 		if (!size)
838 			break;
839 	}
840 	return true;
841 }
842 
843 /**
844  * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
845  * 	are aligned to the parameters.
846  *
847  * @i: &struct iov_iter to restore
848  * @addr_mask: bit mask to check against the iov element's addresses
849  * @len_mask: bit mask to check against the iov element's lengths
850  *
851  * Return: false if any addresses or lengths intersect with the provided masks
852  */
853 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
854 			 unsigned len_mask)
855 {
856 	if (likely(iter_is_ubuf(i))) {
857 		if (i->count & len_mask)
858 			return false;
859 		if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask)
860 			return false;
861 		return true;
862 	}
863 
864 	if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
865 		return iov_iter_aligned_iovec(i, addr_mask, len_mask);
866 
867 	if (iov_iter_is_bvec(i))
868 		return iov_iter_aligned_bvec(i, addr_mask, len_mask);
869 
870 	if (iov_iter_is_xarray(i)) {
871 		if (i->count & len_mask)
872 			return false;
873 		if ((i->xarray_start + i->iov_offset) & addr_mask)
874 			return false;
875 	}
876 
877 	return true;
878 }
879 EXPORT_SYMBOL_GPL(iov_iter_is_aligned);
880 
881 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
882 {
883 	unsigned long res = 0;
884 	size_t size = i->count;
885 	size_t skip = i->iov_offset;
886 	unsigned k;
887 
888 	for (k = 0; k < i->nr_segs; k++, skip = 0) {
889 		const struct iovec *iov = iter_iov(i) + k;
890 		size_t len = iov->iov_len - skip;
891 		if (len) {
892 			res |= (unsigned long)iov->iov_base + skip;
893 			if (len > size)
894 				len = size;
895 			res |= len;
896 			size -= len;
897 			if (!size)
898 				break;
899 		}
900 	}
901 	return res;
902 }
903 
904 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
905 {
906 	unsigned res = 0;
907 	size_t size = i->count;
908 	unsigned skip = i->iov_offset;
909 	unsigned k;
910 
911 	for (k = 0; k < i->nr_segs; k++, skip = 0) {
912 		size_t len = i->bvec[k].bv_len - skip;
913 		res |= (unsigned long)i->bvec[k].bv_offset + skip;
914 		if (len > size)
915 			len = size;
916 		res |= len;
917 		size -= len;
918 		if (!size)
919 			break;
920 	}
921 	return res;
922 }
923 
924 unsigned long iov_iter_alignment(const struct iov_iter *i)
925 {
926 	if (likely(iter_is_ubuf(i))) {
927 		size_t size = i->count;
928 		if (size)
929 			return ((unsigned long)i->ubuf + i->iov_offset) | size;
930 		return 0;
931 	}
932 
933 	/* iovec and kvec have identical layouts */
934 	if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
935 		return iov_iter_alignment_iovec(i);
936 
937 	if (iov_iter_is_bvec(i))
938 		return iov_iter_alignment_bvec(i);
939 
940 	if (iov_iter_is_xarray(i))
941 		return (i->xarray_start + i->iov_offset) | i->count;
942 
943 	return 0;
944 }
945 EXPORT_SYMBOL(iov_iter_alignment);
946 
947 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
948 {
949 	unsigned long res = 0;
950 	unsigned long v = 0;
951 	size_t size = i->count;
952 	unsigned k;
953 
954 	if (iter_is_ubuf(i))
955 		return 0;
956 
957 	if (WARN_ON(!iter_is_iovec(i)))
958 		return ~0U;
959 
960 	for (k = 0; k < i->nr_segs; k++) {
961 		const struct iovec *iov = iter_iov(i) + k;
962 		if (iov->iov_len) {
963 			unsigned long base = (unsigned long)iov->iov_base;
964 			if (v) // if not the first one
965 				res |= base | v; // this start | previous end
966 			v = base + iov->iov_len;
967 			if (size <= iov->iov_len)
968 				break;
969 			size -= iov->iov_len;
970 		}
971 	}
972 	return res;
973 }
974 EXPORT_SYMBOL(iov_iter_gap_alignment);
975 
976 static int want_pages_array(struct page ***res, size_t size,
977 			    size_t start, unsigned int maxpages)
978 {
979 	unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE);
980 
981 	if (count > maxpages)
982 		count = maxpages;
983 	WARN_ON(!count);	// caller should've prevented that
984 	if (!*res) {
985 		*res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
986 		if (!*res)
987 			return 0;
988 	}
989 	return count;
990 }
991 
992 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
993 					  pgoff_t index, unsigned int nr_pages)
994 {
995 	XA_STATE(xas, xa, index);
996 	struct page *page;
997 	unsigned int ret = 0;
998 
999 	rcu_read_lock();
1000 	for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1001 		if (xas_retry(&xas, page))
1002 			continue;
1003 
1004 		/* Has the page moved or been split? */
1005 		if (unlikely(page != xas_reload(&xas))) {
1006 			xas_reset(&xas);
1007 			continue;
1008 		}
1009 
1010 		pages[ret] = find_subpage(page, xas.xa_index);
1011 		get_page(pages[ret]);
1012 		if (++ret == nr_pages)
1013 			break;
1014 	}
1015 	rcu_read_unlock();
1016 	return ret;
1017 }
1018 
1019 static ssize_t iter_xarray_get_pages(struct iov_iter *i,
1020 				     struct page ***pages, size_t maxsize,
1021 				     unsigned maxpages, size_t *_start_offset)
1022 {
1023 	unsigned nr, offset, count;
1024 	pgoff_t index;
1025 	loff_t pos;
1026 
1027 	pos = i->xarray_start + i->iov_offset;
1028 	index = pos >> PAGE_SHIFT;
1029 	offset = pos & ~PAGE_MASK;
1030 	*_start_offset = offset;
1031 
1032 	count = want_pages_array(pages, maxsize, offset, maxpages);
1033 	if (!count)
1034 		return -ENOMEM;
1035 	nr = iter_xarray_populate_pages(*pages, i->xarray, index, count);
1036 	if (nr == 0)
1037 		return 0;
1038 
1039 	maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
1040 	i->iov_offset += maxsize;
1041 	i->count -= maxsize;
1042 	return maxsize;
1043 }
1044 
1045 /* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
1046 static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
1047 {
1048 	size_t skip;
1049 	long k;
1050 
1051 	if (iter_is_ubuf(i))
1052 		return (unsigned long)i->ubuf + i->iov_offset;
1053 
1054 	for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
1055 		const struct iovec *iov = iter_iov(i) + k;
1056 		size_t len = iov->iov_len - skip;
1057 
1058 		if (unlikely(!len))
1059 			continue;
1060 		if (*size > len)
1061 			*size = len;
1062 		return (unsigned long)iov->iov_base + skip;
1063 	}
1064 	BUG(); // if it had been empty, we wouldn't get called
1065 }
1066 
1067 /* must be done on non-empty ITER_BVEC one */
1068 static struct page *first_bvec_segment(const struct iov_iter *i,
1069 				       size_t *size, size_t *start)
1070 {
1071 	struct page *page;
1072 	size_t skip = i->iov_offset, len;
1073 
1074 	len = i->bvec->bv_len - skip;
1075 	if (*size > len)
1076 		*size = len;
1077 	skip += i->bvec->bv_offset;
1078 	page = i->bvec->bv_page + skip / PAGE_SIZE;
1079 	*start = skip % PAGE_SIZE;
1080 	return page;
1081 }
1082 
1083 static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
1084 		   struct page ***pages, size_t maxsize,
1085 		   unsigned int maxpages, size_t *start,
1086 		   iov_iter_extraction_t extraction_flags)
1087 {
1088 	unsigned int n, gup_flags = 0;
1089 
1090 	if (maxsize > i->count)
1091 		maxsize = i->count;
1092 	if (!maxsize)
1093 		return 0;
1094 	if (maxsize > MAX_RW_COUNT)
1095 		maxsize = MAX_RW_COUNT;
1096 	if (extraction_flags & ITER_ALLOW_P2PDMA)
1097 		gup_flags |= FOLL_PCI_P2PDMA;
1098 
1099 	if (likely(user_backed_iter(i))) {
1100 		unsigned long addr;
1101 		int res;
1102 
1103 		if (iov_iter_rw(i) != WRITE)
1104 			gup_flags |= FOLL_WRITE;
1105 		if (i->nofault)
1106 			gup_flags |= FOLL_NOFAULT;
1107 
1108 		addr = first_iovec_segment(i, &maxsize);
1109 		*start = addr % PAGE_SIZE;
1110 		addr &= PAGE_MASK;
1111 		n = want_pages_array(pages, maxsize, *start, maxpages);
1112 		if (!n)
1113 			return -ENOMEM;
1114 		res = get_user_pages_fast(addr, n, gup_flags, *pages);
1115 		if (unlikely(res <= 0))
1116 			return res;
1117 		maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start);
1118 		iov_iter_advance(i, maxsize);
1119 		return maxsize;
1120 	}
1121 	if (iov_iter_is_bvec(i)) {
1122 		struct page **p;
1123 		struct page *page;
1124 
1125 		page = first_bvec_segment(i, &maxsize, start);
1126 		n = want_pages_array(pages, maxsize, *start, maxpages);
1127 		if (!n)
1128 			return -ENOMEM;
1129 		p = *pages;
1130 		for (int k = 0; k < n; k++)
1131 			get_page(p[k] = page + k);
1132 		maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start);
1133 		i->count -= maxsize;
1134 		i->iov_offset += maxsize;
1135 		if (i->iov_offset == i->bvec->bv_len) {
1136 			i->iov_offset = 0;
1137 			i->bvec++;
1138 			i->nr_segs--;
1139 		}
1140 		return maxsize;
1141 	}
1142 	if (iov_iter_is_xarray(i))
1143 		return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
1144 	return -EFAULT;
1145 }
1146 
1147 ssize_t iov_iter_get_pages(struct iov_iter *i,
1148 		   struct page **pages, size_t maxsize, unsigned maxpages,
1149 		   size_t *start, iov_iter_extraction_t extraction_flags)
1150 {
1151 	if (!maxpages)
1152 		return 0;
1153 	BUG_ON(!pages);
1154 
1155 	return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages,
1156 					  start, extraction_flags);
1157 }
1158 EXPORT_SYMBOL_GPL(iov_iter_get_pages);
1159 
1160 ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
1161 		size_t maxsize, unsigned maxpages, size_t *start)
1162 {
1163 	return iov_iter_get_pages(i, pages, maxsize, maxpages, start, 0);
1164 }
1165 EXPORT_SYMBOL(iov_iter_get_pages2);
1166 
1167 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1168 		   struct page ***pages, size_t maxsize,
1169 		   size_t *start, iov_iter_extraction_t extraction_flags)
1170 {
1171 	ssize_t len;
1172 
1173 	*pages = NULL;
1174 
1175 	len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start,
1176 					 extraction_flags);
1177 	if (len <= 0) {
1178 		kvfree(*pages);
1179 		*pages = NULL;
1180 	}
1181 	return len;
1182 }
1183 EXPORT_SYMBOL_GPL(iov_iter_get_pages_alloc);
1184 
1185 ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
1186 		struct page ***pages, size_t maxsize, size_t *start)
1187 {
1188 	return iov_iter_get_pages_alloc(i, pages, maxsize, start, 0);
1189 }
1190 EXPORT_SYMBOL(iov_iter_get_pages_alloc2);
1191 
1192 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1193 			       struct iov_iter *i)
1194 {
1195 	__wsum sum, next;
1196 	sum = *csum;
1197 	if (WARN_ON_ONCE(!i->data_source))
1198 		return 0;
1199 
1200 	iterate_and_advance(i, bytes, base, len, off, ({
1201 		next = csum_and_copy_from_user(base, addr + off, len);
1202 		sum = csum_block_add(sum, next, off);
1203 		next ? 0 : len;
1204 	}), ({
1205 		sum = csum_and_memcpy(addr + off, base, len, sum, off);
1206 	})
1207 	)
1208 	*csum = sum;
1209 	return bytes;
1210 }
1211 EXPORT_SYMBOL(csum_and_copy_from_iter);
1212 
1213 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
1214 			     struct iov_iter *i)
1215 {
1216 	struct csum_state *csstate = _csstate;
1217 	__wsum sum, next;
1218 
1219 	if (WARN_ON_ONCE(i->data_source))
1220 		return 0;
1221 	if (unlikely(iov_iter_is_discard(i))) {
1222 		// can't use csum_memcpy() for that one - data is not copied
1223 		csstate->csum = csum_block_add(csstate->csum,
1224 					       csum_partial(addr, bytes, 0),
1225 					       csstate->off);
1226 		csstate->off += bytes;
1227 		return bytes;
1228 	}
1229 
1230 	sum = csum_shift(csstate->csum, csstate->off);
1231 	iterate_and_advance(i, bytes, base, len, off, ({
1232 		next = csum_and_copy_to_user(addr + off, base, len);
1233 		sum = csum_block_add(sum, next, off);
1234 		next ? 0 : len;
1235 	}), ({
1236 		sum = csum_and_memcpy(base, addr + off, len, sum, off);
1237 	})
1238 	)
1239 	csstate->csum = csum_shift(sum, csstate->off);
1240 	csstate->off += bytes;
1241 	return bytes;
1242 }
1243 EXPORT_SYMBOL(csum_and_copy_to_iter);
1244 
1245 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1246 		struct iov_iter *i)
1247 {
1248 #ifdef CONFIG_CRYPTO_HASH
1249 	struct ahash_request *hash = hashp;
1250 	struct scatterlist sg;
1251 	size_t copied;
1252 
1253 	copied = copy_to_iter(addr, bytes, i);
1254 	sg_init_one(&sg, addr, copied);
1255 	ahash_request_set_crypt(hash, &sg, NULL, copied);
1256 	crypto_ahash_update(hash);
1257 	return copied;
1258 #else
1259 	return 0;
1260 #endif
1261 }
1262 EXPORT_SYMBOL(hash_and_copy_to_iter);
1263 
1264 static int iov_npages(const struct iov_iter *i, int maxpages)
1265 {
1266 	size_t skip = i->iov_offset, size = i->count;
1267 	const struct iovec *p;
1268 	int npages = 0;
1269 
1270 	for (p = iter_iov(i); size; skip = 0, p++) {
1271 		unsigned offs = offset_in_page(p->iov_base + skip);
1272 		size_t len = min(p->iov_len - skip, size);
1273 
1274 		if (len) {
1275 			size -= len;
1276 			npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1277 			if (unlikely(npages > maxpages))
1278 				return maxpages;
1279 		}
1280 	}
1281 	return npages;
1282 }
1283 
1284 static int bvec_npages(const struct iov_iter *i, int maxpages)
1285 {
1286 	size_t skip = i->iov_offset, size = i->count;
1287 	const struct bio_vec *p;
1288 	int npages = 0;
1289 
1290 	for (p = i->bvec; size; skip = 0, p++) {
1291 		unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
1292 		size_t len = min(p->bv_len - skip, size);
1293 
1294 		size -= len;
1295 		npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1296 		if (unlikely(npages > maxpages))
1297 			return maxpages;
1298 	}
1299 	return npages;
1300 }
1301 
1302 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1303 {
1304 	if (unlikely(!i->count))
1305 		return 0;
1306 	if (likely(iter_is_ubuf(i))) {
1307 		unsigned offs = offset_in_page(i->ubuf + i->iov_offset);
1308 		int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE);
1309 		return min(npages, maxpages);
1310 	}
1311 	/* iovec and kvec have identical layouts */
1312 	if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1313 		return iov_npages(i, maxpages);
1314 	if (iov_iter_is_bvec(i))
1315 		return bvec_npages(i, maxpages);
1316 	if (iov_iter_is_xarray(i)) {
1317 		unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
1318 		int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
1319 		return min(npages, maxpages);
1320 	}
1321 	return 0;
1322 }
1323 EXPORT_SYMBOL(iov_iter_npages);
1324 
1325 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1326 {
1327 	*new = *old;
1328 	if (iov_iter_is_bvec(new))
1329 		return new->bvec = kmemdup(new->bvec,
1330 				    new->nr_segs * sizeof(struct bio_vec),
1331 				    flags);
1332 	else if (iov_iter_is_kvec(new) || iter_is_iovec(new))
1333 		/* iovec and kvec have identical layout */
1334 		return new->__iov = kmemdup(new->__iov,
1335 				   new->nr_segs * sizeof(struct iovec),
1336 				   flags);
1337 	return NULL;
1338 }
1339 EXPORT_SYMBOL(dup_iter);
1340 
1341 static __noclone int copy_compat_iovec_from_user(struct iovec *iov,
1342 		const struct iovec __user *uvec, unsigned long nr_segs)
1343 {
1344 	const struct compat_iovec __user *uiov =
1345 		(const struct compat_iovec __user *)uvec;
1346 	int ret = -EFAULT, i;
1347 
1348 	if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1349 		return -EFAULT;
1350 
1351 	for (i = 0; i < nr_segs; i++) {
1352 		compat_uptr_t buf;
1353 		compat_ssize_t len;
1354 
1355 		unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1356 		unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1357 
1358 		/* check for compat_size_t not fitting in compat_ssize_t .. */
1359 		if (len < 0) {
1360 			ret = -EINVAL;
1361 			goto uaccess_end;
1362 		}
1363 		iov[i].iov_base = compat_ptr(buf);
1364 		iov[i].iov_len = len;
1365 	}
1366 
1367 	ret = 0;
1368 uaccess_end:
1369 	user_access_end();
1370 	return ret;
1371 }
1372 
1373 static int copy_iovec_from_user(struct iovec *iov,
1374 		const struct iovec __user *uiov, unsigned long nr_segs)
1375 {
1376 	int ret = -EFAULT;
1377 
1378 	if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1379 		return -EFAULT;
1380 
1381 	do {
1382 		void __user *buf;
1383 		ssize_t len;
1384 
1385 		unsafe_get_user(len, &uiov->iov_len, uaccess_end);
1386 		unsafe_get_user(buf, &uiov->iov_base, uaccess_end);
1387 
1388 		/* check for size_t not fitting in ssize_t .. */
1389 		if (unlikely(len < 0)) {
1390 			ret = -EINVAL;
1391 			goto uaccess_end;
1392 		}
1393 		iov->iov_base = buf;
1394 		iov->iov_len = len;
1395 
1396 		uiov++; iov++;
1397 	} while (--nr_segs);
1398 
1399 	ret = 0;
1400 uaccess_end:
1401 	user_access_end();
1402 	return ret;
1403 }
1404 
1405 struct iovec *iovec_from_user(const struct iovec __user *uvec,
1406 		unsigned long nr_segs, unsigned long fast_segs,
1407 		struct iovec *fast_iov, bool compat)
1408 {
1409 	struct iovec *iov = fast_iov;
1410 	int ret;
1411 
1412 	/*
1413 	 * SuS says "The readv() function *may* fail if the iovcnt argument was
1414 	 * less than or equal to 0, or greater than {IOV_MAX}.  Linux has
1415 	 * traditionally returned zero for zero segments, so...
1416 	 */
1417 	if (nr_segs == 0)
1418 		return iov;
1419 	if (nr_segs > UIO_MAXIOV)
1420 		return ERR_PTR(-EINVAL);
1421 	if (nr_segs > fast_segs) {
1422 		iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
1423 		if (!iov)
1424 			return ERR_PTR(-ENOMEM);
1425 	}
1426 
1427 	if (unlikely(compat))
1428 		ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1429 	else
1430 		ret = copy_iovec_from_user(iov, uvec, nr_segs);
1431 	if (ret) {
1432 		if (iov != fast_iov)
1433 			kfree(iov);
1434 		return ERR_PTR(ret);
1435 	}
1436 
1437 	return iov;
1438 }
1439 
1440 /*
1441  * Single segment iovec supplied by the user, import it as ITER_UBUF.
1442  */
1443 static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec,
1444 				   struct iovec **iovp, struct iov_iter *i,
1445 				   bool compat)
1446 {
1447 	struct iovec *iov = *iovp;
1448 	ssize_t ret;
1449 
1450 	if (compat)
1451 		ret = copy_compat_iovec_from_user(iov, uvec, 1);
1452 	else
1453 		ret = copy_iovec_from_user(iov, uvec, 1);
1454 	if (unlikely(ret))
1455 		return ret;
1456 
1457 	ret = import_ubuf(type, iov->iov_base, iov->iov_len, i);
1458 	if (unlikely(ret))
1459 		return ret;
1460 	*iovp = NULL;
1461 	return i->count;
1462 }
1463 
1464 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1465 		 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1466 		 struct iov_iter *i, bool compat)
1467 {
1468 	ssize_t total_len = 0;
1469 	unsigned long seg;
1470 	struct iovec *iov;
1471 
1472 	if (nr_segs == 1)
1473 		return __import_iovec_ubuf(type, uvec, iovp, i, compat);
1474 
1475 	iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1476 	if (IS_ERR(iov)) {
1477 		*iovp = NULL;
1478 		return PTR_ERR(iov);
1479 	}
1480 
1481 	/*
1482 	 * According to the Single Unix Specification we should return EINVAL if
1483 	 * an element length is < 0 when cast to ssize_t or if the total length
1484 	 * would overflow the ssize_t return value of the system call.
1485 	 *
1486 	 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1487 	 * overflow case.
1488 	 */
1489 	for (seg = 0; seg < nr_segs; seg++) {
1490 		ssize_t len = (ssize_t)iov[seg].iov_len;
1491 
1492 		if (!access_ok(iov[seg].iov_base, len)) {
1493 			if (iov != *iovp)
1494 				kfree(iov);
1495 			*iovp = NULL;
1496 			return -EFAULT;
1497 		}
1498 
1499 		if (len > MAX_RW_COUNT - total_len) {
1500 			len = MAX_RW_COUNT - total_len;
1501 			iov[seg].iov_len = len;
1502 		}
1503 		total_len += len;
1504 	}
1505 
1506 	iov_iter_init(i, type, iov, nr_segs, total_len);
1507 	if (iov == *iovp)
1508 		*iovp = NULL;
1509 	else
1510 		*iovp = iov;
1511 	return total_len;
1512 }
1513 
1514 /**
1515  * import_iovec() - Copy an array of &struct iovec from userspace
1516  *     into the kernel, check that it is valid, and initialize a new
1517  *     &struct iov_iter iterator to access it.
1518  *
1519  * @type: One of %READ or %WRITE.
1520  * @uvec: Pointer to the userspace array.
1521  * @nr_segs: Number of elements in userspace array.
1522  * @fast_segs: Number of elements in @iov.
1523  * @iovp: (input and output parameter) Pointer to pointer to (usually small
1524  *     on-stack) kernel array.
1525  * @i: Pointer to iterator that will be initialized on success.
1526  *
1527  * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1528  * then this function places %NULL in *@iov on return. Otherwise, a new
1529  * array will be allocated and the result placed in *@iov. This means that
1530  * the caller may call kfree() on *@iov regardless of whether the small
1531  * on-stack array was used or not (and regardless of whether this function
1532  * returns an error or not).
1533  *
1534  * Return: Negative error code on error, bytes imported on success
1535  */
1536 ssize_t import_iovec(int type, const struct iovec __user *uvec,
1537 		 unsigned nr_segs, unsigned fast_segs,
1538 		 struct iovec **iovp, struct iov_iter *i)
1539 {
1540 	return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
1541 			      in_compat_syscall());
1542 }
1543 EXPORT_SYMBOL(import_iovec);
1544 
1545 int import_single_range(int rw, void __user *buf, size_t len,
1546 		 struct iovec *iov, struct iov_iter *i)
1547 {
1548 	if (len > MAX_RW_COUNT)
1549 		len = MAX_RW_COUNT;
1550 	if (unlikely(!access_ok(buf, len)))
1551 		return -EFAULT;
1552 
1553 	iov_iter_ubuf(i, rw, buf, len);
1554 	return 0;
1555 }
1556 EXPORT_SYMBOL(import_single_range);
1557 
1558 int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i)
1559 {
1560 	if (len > MAX_RW_COUNT)
1561 		len = MAX_RW_COUNT;
1562 	if (unlikely(!access_ok(buf, len)))
1563 		return -EFAULT;
1564 
1565 	iov_iter_ubuf(i, rw, buf, len);
1566 	return 0;
1567 }
1568 
1569 /**
1570  * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
1571  *     iov_iter_save_state() was called.
1572  *
1573  * @i: &struct iov_iter to restore
1574  * @state: state to restore from
1575  *
1576  * Used after iov_iter_save_state() to bring restore @i, if operations may
1577  * have advanced it.
1578  *
1579  * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
1580  */
1581 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
1582 {
1583 	if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i) &&
1584 			 !iter_is_ubuf(i)) && !iov_iter_is_kvec(i))
1585 		return;
1586 	i->iov_offset = state->iov_offset;
1587 	i->count = state->count;
1588 	if (iter_is_ubuf(i))
1589 		return;
1590 	/*
1591 	 * For the *vec iters, nr_segs + iov is constant - if we increment
1592 	 * the vec, then we also decrement the nr_segs count. Hence we don't
1593 	 * need to track both of these, just one is enough and we can deduct
1594 	 * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
1595 	 * size, so we can just increment the iov pointer as they are unionzed.
1596 	 * ITER_BVEC _may_ be the same size on some archs, but on others it is
1597 	 * not. Be safe and handle it separately.
1598 	 */
1599 	BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
1600 	if (iov_iter_is_bvec(i))
1601 		i->bvec -= state->nr_segs - i->nr_segs;
1602 	else
1603 		i->__iov -= state->nr_segs - i->nr_segs;
1604 	i->nr_segs = state->nr_segs;
1605 }
1606 
1607 /*
1608  * Extract a list of contiguous pages from an ITER_XARRAY iterator.  This does not
1609  * get references on the pages, nor does it get a pin on them.
1610  */
1611 static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i,
1612 					     struct page ***pages, size_t maxsize,
1613 					     unsigned int maxpages,
1614 					     iov_iter_extraction_t extraction_flags,
1615 					     size_t *offset0)
1616 {
1617 	struct page *page, **p;
1618 	unsigned int nr = 0, offset;
1619 	loff_t pos = i->xarray_start + i->iov_offset;
1620 	pgoff_t index = pos >> PAGE_SHIFT;
1621 	XA_STATE(xas, i->xarray, index);
1622 
1623 	offset = pos & ~PAGE_MASK;
1624 	*offset0 = offset;
1625 
1626 	maxpages = want_pages_array(pages, maxsize, offset, maxpages);
1627 	if (!maxpages)
1628 		return -ENOMEM;
1629 	p = *pages;
1630 
1631 	rcu_read_lock();
1632 	for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1633 		if (xas_retry(&xas, page))
1634 			continue;
1635 
1636 		/* Has the page moved or been split? */
1637 		if (unlikely(page != xas_reload(&xas))) {
1638 			xas_reset(&xas);
1639 			continue;
1640 		}
1641 
1642 		p[nr++] = find_subpage(page, xas.xa_index);
1643 		if (nr == maxpages)
1644 			break;
1645 	}
1646 	rcu_read_unlock();
1647 
1648 	maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
1649 	iov_iter_advance(i, maxsize);
1650 	return maxsize;
1651 }
1652 
1653 /*
1654  * Extract a list of contiguous pages from an ITER_BVEC iterator.  This does
1655  * not get references on the pages, nor does it get a pin on them.
1656  */
1657 static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i,
1658 					   struct page ***pages, size_t maxsize,
1659 					   unsigned int maxpages,
1660 					   iov_iter_extraction_t extraction_flags,
1661 					   size_t *offset0)
1662 {
1663 	struct page **p, *page;
1664 	size_t skip = i->iov_offset, offset;
1665 	int k;
1666 
1667 	for (;;) {
1668 		if (i->nr_segs == 0)
1669 			return 0;
1670 		maxsize = min(maxsize, i->bvec->bv_len - skip);
1671 		if (maxsize)
1672 			break;
1673 		i->iov_offset = 0;
1674 		i->nr_segs--;
1675 		i->bvec++;
1676 		skip = 0;
1677 	}
1678 
1679 	skip += i->bvec->bv_offset;
1680 	page = i->bvec->bv_page + skip / PAGE_SIZE;
1681 	offset = skip % PAGE_SIZE;
1682 	*offset0 = offset;
1683 
1684 	maxpages = want_pages_array(pages, maxsize, offset, maxpages);
1685 	if (!maxpages)
1686 		return -ENOMEM;
1687 	p = *pages;
1688 	for (k = 0; k < maxpages; k++)
1689 		p[k] = page + k;
1690 
1691 	maxsize = min_t(size_t, maxsize, maxpages * PAGE_SIZE - offset);
1692 	iov_iter_advance(i, maxsize);
1693 	return maxsize;
1694 }
1695 
1696 /*
1697  * Extract a list of virtually contiguous pages from an ITER_KVEC iterator.
1698  * This does not get references on the pages, nor does it get a pin on them.
1699  */
1700 static ssize_t iov_iter_extract_kvec_pages(struct iov_iter *i,
1701 					   struct page ***pages, size_t maxsize,
1702 					   unsigned int maxpages,
1703 					   iov_iter_extraction_t extraction_flags,
1704 					   size_t *offset0)
1705 {
1706 	struct page **p, *page;
1707 	const void *kaddr;
1708 	size_t skip = i->iov_offset, offset, len;
1709 	int k;
1710 
1711 	for (;;) {
1712 		if (i->nr_segs == 0)
1713 			return 0;
1714 		maxsize = min(maxsize, i->kvec->iov_len - skip);
1715 		if (maxsize)
1716 			break;
1717 		i->iov_offset = 0;
1718 		i->nr_segs--;
1719 		i->kvec++;
1720 		skip = 0;
1721 	}
1722 
1723 	kaddr = i->kvec->iov_base + skip;
1724 	offset = (unsigned long)kaddr & ~PAGE_MASK;
1725 	*offset0 = offset;
1726 
1727 	maxpages = want_pages_array(pages, maxsize, offset, maxpages);
1728 	if (!maxpages)
1729 		return -ENOMEM;
1730 	p = *pages;
1731 
1732 	kaddr -= offset;
1733 	len = offset + maxsize;
1734 	for (k = 0; k < maxpages; k++) {
1735 		size_t seg = min_t(size_t, len, PAGE_SIZE);
1736 
1737 		if (is_vmalloc_or_module_addr(kaddr))
1738 			page = vmalloc_to_page(kaddr);
1739 		else
1740 			page = virt_to_page(kaddr);
1741 
1742 		p[k] = page;
1743 		len -= seg;
1744 		kaddr += PAGE_SIZE;
1745 	}
1746 
1747 	maxsize = min_t(size_t, maxsize, maxpages * PAGE_SIZE - offset);
1748 	iov_iter_advance(i, maxsize);
1749 	return maxsize;
1750 }
1751 
1752 /*
1753  * Extract a list of contiguous pages from a user iterator and get a pin on
1754  * each of them.  This should only be used if the iterator is user-backed
1755  * (IOBUF/UBUF).
1756  *
1757  * It does not get refs on the pages, but the pages must be unpinned by the
1758  * caller once the transfer is complete.
1759  *
1760  * This is safe to be used where background IO/DMA *is* going to be modifying
1761  * the buffer; using a pin rather than a ref makes forces fork() to give the
1762  * child a copy of the page.
1763  */
1764 static ssize_t iov_iter_extract_user_pages(struct iov_iter *i,
1765 					   struct page ***pages,
1766 					   size_t maxsize,
1767 					   unsigned int maxpages,
1768 					   iov_iter_extraction_t extraction_flags,
1769 					   size_t *offset0)
1770 {
1771 	unsigned long addr;
1772 	unsigned int gup_flags = 0;
1773 	size_t offset;
1774 	int res;
1775 
1776 	if (i->data_source == ITER_DEST)
1777 		gup_flags |= FOLL_WRITE;
1778 	if (extraction_flags & ITER_ALLOW_P2PDMA)
1779 		gup_flags |= FOLL_PCI_P2PDMA;
1780 	if (i->nofault)
1781 		gup_flags |= FOLL_NOFAULT;
1782 
1783 	addr = first_iovec_segment(i, &maxsize);
1784 	*offset0 = offset = addr % PAGE_SIZE;
1785 	addr &= PAGE_MASK;
1786 	maxpages = want_pages_array(pages, maxsize, offset, maxpages);
1787 	if (!maxpages)
1788 		return -ENOMEM;
1789 	res = pin_user_pages_fast(addr, maxpages, gup_flags, *pages);
1790 	if (unlikely(res <= 0))
1791 		return res;
1792 	maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - offset);
1793 	iov_iter_advance(i, maxsize);
1794 	return maxsize;
1795 }
1796 
1797 /**
1798  * iov_iter_extract_pages - Extract a list of contiguous pages from an iterator
1799  * @i: The iterator to extract from
1800  * @pages: Where to return the list of pages
1801  * @maxsize: The maximum amount of iterator to extract
1802  * @maxpages: The maximum size of the list of pages
1803  * @extraction_flags: Flags to qualify request
1804  * @offset0: Where to return the starting offset into (*@pages)[0]
1805  *
1806  * Extract a list of contiguous pages from the current point of the iterator,
1807  * advancing the iterator.  The maximum number of pages and the maximum amount
1808  * of page contents can be set.
1809  *
1810  * If *@pages is NULL, a page list will be allocated to the required size and
1811  * *@pages will be set to its base.  If *@pages is not NULL, it will be assumed
1812  * that the caller allocated a page list at least @maxpages in size and this
1813  * will be filled in.
1814  *
1815  * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA
1816  * be allowed on the pages extracted.
1817  *
1818  * The iov_iter_extract_will_pin() function can be used to query how cleanup
1819  * should be performed.
1820  *
1821  * Extra refs or pins on the pages may be obtained as follows:
1822  *
1823  *  (*) If the iterator is user-backed (ITER_IOVEC/ITER_UBUF), pins will be
1824  *      added to the pages, but refs will not be taken.
1825  *      iov_iter_extract_will_pin() will return true.
1826  *
1827  *  (*) If the iterator is ITER_KVEC, ITER_BVEC or ITER_XARRAY, the pages are
1828  *      merely listed; no extra refs or pins are obtained.
1829  *      iov_iter_extract_will_pin() will return 0.
1830  *
1831  * Note also:
1832  *
1833  *  (*) Use with ITER_DISCARD is not supported as that has no content.
1834  *
1835  * On success, the function sets *@pages to the new pagelist, if allocated, and
1836  * sets *offset0 to the offset into the first page.
1837  *
1838  * It may also return -ENOMEM and -EFAULT.
1839  */
1840 ssize_t iov_iter_extract_pages(struct iov_iter *i,
1841 			       struct page ***pages,
1842 			       size_t maxsize,
1843 			       unsigned int maxpages,
1844 			       iov_iter_extraction_t extraction_flags,
1845 			       size_t *offset0)
1846 {
1847 	maxsize = min_t(size_t, min_t(size_t, maxsize, i->count), MAX_RW_COUNT);
1848 	if (!maxsize)
1849 		return 0;
1850 
1851 	if (likely(user_backed_iter(i)))
1852 		return iov_iter_extract_user_pages(i, pages, maxsize,
1853 						   maxpages, extraction_flags,
1854 						   offset0);
1855 	if (iov_iter_is_kvec(i))
1856 		return iov_iter_extract_kvec_pages(i, pages, maxsize,
1857 						   maxpages, extraction_flags,
1858 						   offset0);
1859 	if (iov_iter_is_bvec(i))
1860 		return iov_iter_extract_bvec_pages(i, pages, maxsize,
1861 						   maxpages, extraction_flags,
1862 						   offset0);
1863 	if (iov_iter_is_xarray(i))
1864 		return iov_iter_extract_xarray_pages(i, pages, maxsize,
1865 						     maxpages, extraction_flags,
1866 						     offset0);
1867 	return -EFAULT;
1868 }
1869 EXPORT_SYMBOL_GPL(iov_iter_extract_pages);
1870