xref: /openbmc/linux/lib/iov_iter.c (revision bb26cfd9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <crypto/hash.h>
3 #include <linux/export.h>
4 #include <linux/bvec.h>
5 #include <linux/fault-inject-usercopy.h>
6 #include <linux/uio.h>
7 #include <linux/pagemap.h>
8 #include <linux/highmem.h>
9 #include <linux/slab.h>
10 #include <linux/vmalloc.h>
11 #include <linux/splice.h>
12 #include <linux/compat.h>
13 #include <net/checksum.h>
14 #include <linux/scatterlist.h>
15 #include <linux/instrumented.h>
16 
17 #define PIPE_PARANOIA /* for now */
18 
19 /* covers iovec and kvec alike */
20 #define iterate_iovec(i, n, base, len, off, __p, STEP) {	\
21 	size_t off = 0;						\
22 	size_t skip = i->iov_offset;				\
23 	do {							\
24 		len = min(n, __p->iov_len - skip);		\
25 		if (likely(len)) {				\
26 			base = __p->iov_base + skip;		\
27 			len -= (STEP);				\
28 			off += len;				\
29 			skip += len;				\
30 			n -= len;				\
31 			if (skip < __p->iov_len)		\
32 				break;				\
33 		}						\
34 		__p++;						\
35 		skip = 0;					\
36 	} while (n);						\
37 	i->iov_offset = skip;					\
38 	n = off;						\
39 }
40 
41 #define iterate_bvec(i, n, base, len, off, p, STEP) {		\
42 	size_t off = 0;						\
43 	unsigned skip = i->iov_offset;				\
44 	while (n) {						\
45 		unsigned offset = p->bv_offset + skip;		\
46 		unsigned left;					\
47 		void *kaddr = kmap_local_page(p->bv_page +	\
48 					offset / PAGE_SIZE);	\
49 		base = kaddr + offset % PAGE_SIZE;		\
50 		len = min(min(n, (size_t)(p->bv_len - skip)),	\
51 		     (size_t)(PAGE_SIZE - offset % PAGE_SIZE));	\
52 		left = (STEP);					\
53 		kunmap_local(kaddr);				\
54 		len -= left;					\
55 		off += len;					\
56 		skip += len;					\
57 		if (skip == p->bv_len) {			\
58 			skip = 0;				\
59 			p++;					\
60 		}						\
61 		n -= len;					\
62 		if (left)					\
63 			break;					\
64 	}							\
65 	i->iov_offset = skip;					\
66 	n = off;						\
67 }
68 
69 #define iterate_xarray(i, n, base, len, __off, STEP) {		\
70 	__label__ __out;					\
71 	size_t __off = 0;					\
72 	struct folio *folio;					\
73 	loff_t start = i->xarray_start + i->iov_offset;		\
74 	pgoff_t index = start / PAGE_SIZE;			\
75 	XA_STATE(xas, i->xarray, index);			\
76 								\
77 	len = PAGE_SIZE - offset_in_page(start);		\
78 	rcu_read_lock();					\
79 	xas_for_each(&xas, folio, ULONG_MAX) {			\
80 		unsigned left;					\
81 		size_t offset;					\
82 		if (xas_retry(&xas, folio))			\
83 			continue;				\
84 		if (WARN_ON(xa_is_value(folio)))		\
85 			break;					\
86 		if (WARN_ON(folio_test_hugetlb(folio)))		\
87 			break;					\
88 		offset = offset_in_folio(folio, start + __off);	\
89 		while (offset < folio_size(folio)) {		\
90 			base = kmap_local_folio(folio, offset);	\
91 			len = min(n, len);			\
92 			left = (STEP);				\
93 			kunmap_local(base);			\
94 			len -= left;				\
95 			__off += len;				\
96 			n -= len;				\
97 			if (left || n == 0)			\
98 				goto __out;			\
99 			offset += len;				\
100 			len = PAGE_SIZE;			\
101 		}						\
102 	}							\
103 __out:								\
104 	rcu_read_unlock();					\
105 	i->iov_offset += __off;					\
106 	n = __off;						\
107 }
108 
109 #define __iterate_and_advance(i, n, base, len, off, I, K) {	\
110 	if (unlikely(i->count < n))				\
111 		n = i->count;					\
112 	if (likely(n)) {					\
113 		if (likely(iter_is_iovec(i))) {			\
114 			const struct iovec *iov = i->iov;	\
115 			void __user *base;			\
116 			size_t len;				\
117 			iterate_iovec(i, n, base, len, off,	\
118 						iov, (I))	\
119 			i->nr_segs -= iov - i->iov;		\
120 			i->iov = iov;				\
121 		} else if (iov_iter_is_bvec(i)) {		\
122 			const struct bio_vec *bvec = i->bvec;	\
123 			void *base;				\
124 			size_t len;				\
125 			iterate_bvec(i, n, base, len, off,	\
126 						bvec, (K))	\
127 			i->nr_segs -= bvec - i->bvec;		\
128 			i->bvec = bvec;				\
129 		} else if (iov_iter_is_kvec(i)) {		\
130 			const struct kvec *kvec = i->kvec;	\
131 			void *base;				\
132 			size_t len;				\
133 			iterate_iovec(i, n, base, len, off,	\
134 						kvec, (K))	\
135 			i->nr_segs -= kvec - i->kvec;		\
136 			i->kvec = kvec;				\
137 		} else if (iov_iter_is_xarray(i)) {		\
138 			void *base;				\
139 			size_t len;				\
140 			iterate_xarray(i, n, base, len, off,	\
141 							(K))	\
142 		}						\
143 		i->count -= n;					\
144 	}							\
145 }
146 #define iterate_and_advance(i, n, base, len, off, I, K) \
147 	__iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
148 
149 static int copyout(void __user *to, const void *from, size_t n)
150 {
151 	if (should_fail_usercopy())
152 		return n;
153 	if (access_ok(to, n)) {
154 		instrument_copy_to_user(to, from, n);
155 		n = raw_copy_to_user(to, from, n);
156 	}
157 	return n;
158 }
159 
160 static int copyin(void *to, const void __user *from, size_t n)
161 {
162 	if (should_fail_usercopy())
163 		return n;
164 	if (access_ok(from, n)) {
165 		instrument_copy_from_user(to, from, n);
166 		n = raw_copy_from_user(to, from, n);
167 	}
168 	return n;
169 }
170 
171 #ifdef PIPE_PARANOIA
172 static bool sanity(const struct iov_iter *i)
173 {
174 	struct pipe_inode_info *pipe = i->pipe;
175 	unsigned int p_head = pipe->head;
176 	unsigned int p_tail = pipe->tail;
177 	unsigned int p_mask = pipe->ring_size - 1;
178 	unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
179 	unsigned int i_head = i->head;
180 	unsigned int idx;
181 
182 	if (i->iov_offset) {
183 		struct pipe_buffer *p;
184 		if (unlikely(p_occupancy == 0))
185 			goto Bad;	// pipe must be non-empty
186 		if (unlikely(i_head != p_head - 1))
187 			goto Bad;	// must be at the last buffer...
188 
189 		p = &pipe->bufs[i_head & p_mask];
190 		if (unlikely(p->offset + p->len != i->iov_offset))
191 			goto Bad;	// ... at the end of segment
192 	} else {
193 		if (i_head != p_head)
194 			goto Bad;	// must be right after the last buffer
195 	}
196 	return true;
197 Bad:
198 	printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset);
199 	printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
200 			p_head, p_tail, pipe->ring_size);
201 	for (idx = 0; idx < pipe->ring_size; idx++)
202 		printk(KERN_ERR "[%p %p %d %d]\n",
203 			pipe->bufs[idx].ops,
204 			pipe->bufs[idx].page,
205 			pipe->bufs[idx].offset,
206 			pipe->bufs[idx].len);
207 	WARN_ON(1);
208 	return false;
209 }
210 #else
211 #define sanity(i) true
212 #endif
213 
214 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
215 			 struct iov_iter *i)
216 {
217 	struct pipe_inode_info *pipe = i->pipe;
218 	struct pipe_buffer *buf;
219 	unsigned int p_tail = pipe->tail;
220 	unsigned int p_mask = pipe->ring_size - 1;
221 	unsigned int i_head = i->head;
222 	size_t off;
223 
224 	if (unlikely(bytes > i->count))
225 		bytes = i->count;
226 
227 	if (unlikely(!bytes))
228 		return 0;
229 
230 	if (!sanity(i))
231 		return 0;
232 
233 	off = i->iov_offset;
234 	buf = &pipe->bufs[i_head & p_mask];
235 	if (off) {
236 		if (offset == off && buf->page == page) {
237 			/* merge with the last one */
238 			buf->len += bytes;
239 			i->iov_offset += bytes;
240 			goto out;
241 		}
242 		i_head++;
243 		buf = &pipe->bufs[i_head & p_mask];
244 	}
245 	if (pipe_full(i_head, p_tail, pipe->max_usage))
246 		return 0;
247 
248 	buf->ops = &page_cache_pipe_buf_ops;
249 	buf->flags = 0;
250 	get_page(page);
251 	buf->page = page;
252 	buf->offset = offset;
253 	buf->len = bytes;
254 
255 	pipe->head = i_head + 1;
256 	i->iov_offset = offset + bytes;
257 	i->head = i_head;
258 out:
259 	i->count -= bytes;
260 	return bytes;
261 }
262 
263 /*
264  * fault_in_iov_iter_readable - fault in iov iterator for reading
265  * @i: iterator
266  * @size: maximum length
267  *
268  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
269  * @size.  For each iovec, fault in each page that constitutes the iovec.
270  *
271  * Returns the number of bytes not faulted in (like copy_to_user() and
272  * copy_from_user()).
273  *
274  * Always returns 0 for non-userspace iterators.
275  */
276 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
277 {
278 	if (iter_is_iovec(i)) {
279 		size_t count = min(size, iov_iter_count(i));
280 		const struct iovec *p;
281 		size_t skip;
282 
283 		size -= count;
284 		for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
285 			size_t len = min(count, p->iov_len - skip);
286 			size_t ret;
287 
288 			if (unlikely(!len))
289 				continue;
290 			ret = fault_in_readable(p->iov_base + skip, len);
291 			count -= len - ret;
292 			if (ret)
293 				break;
294 		}
295 		return count + size;
296 	}
297 	return 0;
298 }
299 EXPORT_SYMBOL(fault_in_iov_iter_readable);
300 
301 /*
302  * fault_in_iov_iter_writeable - fault in iov iterator for writing
303  * @i: iterator
304  * @size: maximum length
305  *
306  * Faults in the iterator using get_user_pages(), i.e., without triggering
307  * hardware page faults.  This is primarily useful when we already know that
308  * some or all of the pages in @i aren't in memory.
309  *
310  * Returns the number of bytes not faulted in, like copy_to_user() and
311  * copy_from_user().
312  *
313  * Always returns 0 for non-user-space iterators.
314  */
315 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
316 {
317 	if (iter_is_iovec(i)) {
318 		size_t count = min(size, iov_iter_count(i));
319 		const struct iovec *p;
320 		size_t skip;
321 
322 		size -= count;
323 		for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
324 			size_t len = min(count, p->iov_len - skip);
325 			size_t ret;
326 
327 			if (unlikely(!len))
328 				continue;
329 			ret = fault_in_safe_writeable(p->iov_base + skip, len);
330 			count -= len - ret;
331 			if (ret)
332 				break;
333 		}
334 		return count + size;
335 	}
336 	return 0;
337 }
338 EXPORT_SYMBOL(fault_in_iov_iter_writeable);
339 
340 void iov_iter_init(struct iov_iter *i, unsigned int direction,
341 			const struct iovec *iov, unsigned long nr_segs,
342 			size_t count)
343 {
344 	WARN_ON(direction & ~(READ | WRITE));
345 	*i = (struct iov_iter) {
346 		.iter_type = ITER_IOVEC,
347 		.nofault = false,
348 		.data_source = direction,
349 		.iov = iov,
350 		.nr_segs = nr_segs,
351 		.iov_offset = 0,
352 		.count = count
353 	};
354 }
355 EXPORT_SYMBOL(iov_iter_init);
356 
357 static inline bool allocated(struct pipe_buffer *buf)
358 {
359 	return buf->ops == &default_pipe_buf_ops;
360 }
361 
362 static inline void data_start(const struct iov_iter *i,
363 			      unsigned int *iter_headp, size_t *offp)
364 {
365 	unsigned int p_mask = i->pipe->ring_size - 1;
366 	unsigned int iter_head = i->head;
367 	size_t off = i->iov_offset;
368 
369 	if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) ||
370 		    off == PAGE_SIZE)) {
371 		iter_head++;
372 		off = 0;
373 	}
374 	*iter_headp = iter_head;
375 	*offp = off;
376 }
377 
378 static size_t push_pipe(struct iov_iter *i, size_t size,
379 			int *iter_headp, size_t *offp)
380 {
381 	struct pipe_inode_info *pipe = i->pipe;
382 	unsigned int p_tail = pipe->tail;
383 	unsigned int p_mask = pipe->ring_size - 1;
384 	unsigned int iter_head;
385 	size_t off;
386 	ssize_t left;
387 
388 	if (unlikely(size > i->count))
389 		size = i->count;
390 	if (unlikely(!size))
391 		return 0;
392 
393 	left = size;
394 	data_start(i, &iter_head, &off);
395 	*iter_headp = iter_head;
396 	*offp = off;
397 	if (off) {
398 		left -= PAGE_SIZE - off;
399 		if (left <= 0) {
400 			pipe->bufs[iter_head & p_mask].len += size;
401 			return size;
402 		}
403 		pipe->bufs[iter_head & p_mask].len = PAGE_SIZE;
404 		iter_head++;
405 	}
406 	while (!pipe_full(iter_head, p_tail, pipe->max_usage)) {
407 		struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask];
408 		struct page *page = alloc_page(GFP_USER);
409 		if (!page)
410 			break;
411 
412 		buf->ops = &default_pipe_buf_ops;
413 		buf->flags = 0;
414 		buf->page = page;
415 		buf->offset = 0;
416 		buf->len = min_t(ssize_t, left, PAGE_SIZE);
417 		left -= buf->len;
418 		iter_head++;
419 		pipe->head = iter_head;
420 
421 		if (left == 0)
422 			return size;
423 	}
424 	return size - left;
425 }
426 
427 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
428 				struct iov_iter *i)
429 {
430 	struct pipe_inode_info *pipe = i->pipe;
431 	unsigned int p_mask = pipe->ring_size - 1;
432 	unsigned int i_head;
433 	size_t n, off;
434 
435 	if (!sanity(i))
436 		return 0;
437 
438 	bytes = n = push_pipe(i, bytes, &i_head, &off);
439 	if (unlikely(!n))
440 		return 0;
441 	do {
442 		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
443 		memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk);
444 		i->head = i_head;
445 		i->iov_offset = off + chunk;
446 		n -= chunk;
447 		addr += chunk;
448 		off = 0;
449 		i_head++;
450 	} while (n);
451 	i->count -= bytes;
452 	return bytes;
453 }
454 
455 static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
456 			      __wsum sum, size_t off)
457 {
458 	__wsum next = csum_partial_copy_nocheck(from, to, len);
459 	return csum_block_add(sum, next, off);
460 }
461 
462 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
463 					 struct iov_iter *i, __wsum *sump)
464 {
465 	struct pipe_inode_info *pipe = i->pipe;
466 	unsigned int p_mask = pipe->ring_size - 1;
467 	__wsum sum = *sump;
468 	size_t off = 0;
469 	unsigned int i_head;
470 	size_t r;
471 
472 	if (!sanity(i))
473 		return 0;
474 
475 	bytes = push_pipe(i, bytes, &i_head, &r);
476 	while (bytes) {
477 		size_t chunk = min_t(size_t, bytes, PAGE_SIZE - r);
478 		char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
479 		sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off);
480 		kunmap_local(p);
481 		i->head = i_head;
482 		i->iov_offset = r + chunk;
483 		bytes -= chunk;
484 		off += chunk;
485 		r = 0;
486 		i_head++;
487 	}
488 	*sump = sum;
489 	i->count -= off;
490 	return off;
491 }
492 
493 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
494 {
495 	if (unlikely(iov_iter_is_pipe(i)))
496 		return copy_pipe_to_iter(addr, bytes, i);
497 	if (iter_is_iovec(i))
498 		might_fault();
499 	iterate_and_advance(i, bytes, base, len, off,
500 		copyout(base, addr + off, len),
501 		memcpy(base, addr + off, len)
502 	)
503 
504 	return bytes;
505 }
506 EXPORT_SYMBOL(_copy_to_iter);
507 
508 #ifdef CONFIG_ARCH_HAS_COPY_MC
509 static int copyout_mc(void __user *to, const void *from, size_t n)
510 {
511 	if (access_ok(to, n)) {
512 		instrument_copy_to_user(to, from, n);
513 		n = copy_mc_to_user((__force void *) to, from, n);
514 	}
515 	return n;
516 }
517 
518 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
519 				struct iov_iter *i)
520 {
521 	struct pipe_inode_info *pipe = i->pipe;
522 	unsigned int p_mask = pipe->ring_size - 1;
523 	unsigned int i_head;
524 	unsigned int valid = pipe->head;
525 	size_t n, off, xfer = 0;
526 
527 	if (!sanity(i))
528 		return 0;
529 
530 	n = push_pipe(i, bytes, &i_head, &off);
531 	while (n) {
532 		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
533 		char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
534 		unsigned long rem;
535 		rem = copy_mc_to_kernel(p + off, addr + xfer, chunk);
536 		chunk -= rem;
537 		kunmap_local(p);
538 		if (chunk) {
539 			i->head = i_head;
540 			i->iov_offset = off + chunk;
541 			xfer += chunk;
542 			valid = i_head + 1;
543 		}
544 		if (rem) {
545 			pipe->bufs[i_head & p_mask].len -= rem;
546 			pipe_discard_from(pipe, valid);
547 			break;
548 		}
549 		n -= chunk;
550 		off = 0;
551 		i_head++;
552 	}
553 	i->count -= xfer;
554 	return xfer;
555 }
556 
557 /**
558  * _copy_mc_to_iter - copy to iter with source memory error exception handling
559  * @addr: source kernel address
560  * @bytes: total transfer length
561  * @i: destination iterator
562  *
563  * The pmem driver deploys this for the dax operation
564  * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
565  * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
566  * successfully copied.
567  *
568  * The main differences between this and typical _copy_to_iter().
569  *
570  * * Typical tail/residue handling after a fault retries the copy
571  *   byte-by-byte until the fault happens again. Re-triggering machine
572  *   checks is potentially fatal so the implementation uses source
573  *   alignment and poison alignment assumptions to avoid re-triggering
574  *   hardware exceptions.
575  *
576  * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
577  *   Compare to copy_to_iter() where only ITER_IOVEC attempts might return
578  *   a short copy.
579  *
580  * Return: number of bytes copied (may be %0)
581  */
582 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
583 {
584 	if (unlikely(iov_iter_is_pipe(i)))
585 		return copy_mc_pipe_to_iter(addr, bytes, i);
586 	if (iter_is_iovec(i))
587 		might_fault();
588 	__iterate_and_advance(i, bytes, base, len, off,
589 		copyout_mc(base, addr + off, len),
590 		copy_mc_to_kernel(base, addr + off, len)
591 	)
592 
593 	return bytes;
594 }
595 EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
596 #endif /* CONFIG_ARCH_HAS_COPY_MC */
597 
598 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
599 {
600 	if (unlikely(iov_iter_is_pipe(i))) {
601 		WARN_ON(1);
602 		return 0;
603 	}
604 	if (iter_is_iovec(i))
605 		might_fault();
606 	iterate_and_advance(i, bytes, base, len, off,
607 		copyin(addr + off, base, len),
608 		memcpy(addr + off, base, len)
609 	)
610 
611 	return bytes;
612 }
613 EXPORT_SYMBOL(_copy_from_iter);
614 
615 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
616 {
617 	if (unlikely(iov_iter_is_pipe(i))) {
618 		WARN_ON(1);
619 		return 0;
620 	}
621 	iterate_and_advance(i, bytes, base, len, off,
622 		__copy_from_user_inatomic_nocache(addr + off, base, len),
623 		memcpy(addr + off, base, len)
624 	)
625 
626 	return bytes;
627 }
628 EXPORT_SYMBOL(_copy_from_iter_nocache);
629 
630 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
631 /**
632  * _copy_from_iter_flushcache - write destination through cpu cache
633  * @addr: destination kernel address
634  * @bytes: total transfer length
635  * @i: source iterator
636  *
637  * The pmem driver arranges for filesystem-dax to use this facility via
638  * dax_copy_from_iter() for ensuring that writes to persistent memory
639  * are flushed through the CPU cache. It is differentiated from
640  * _copy_from_iter_nocache() in that guarantees all data is flushed for
641  * all iterator types. The _copy_from_iter_nocache() only attempts to
642  * bypass the cache for the ITER_IOVEC case, and on some archs may use
643  * instructions that strand dirty-data in the cache.
644  *
645  * Return: number of bytes copied (may be %0)
646  */
647 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
648 {
649 	if (unlikely(iov_iter_is_pipe(i))) {
650 		WARN_ON(1);
651 		return 0;
652 	}
653 	iterate_and_advance(i, bytes, base, len, off,
654 		__copy_from_user_flushcache(addr + off, base, len),
655 		memcpy_flushcache(addr + off, base, len)
656 	)
657 
658 	return bytes;
659 }
660 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
661 #endif
662 
663 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
664 {
665 	struct page *head;
666 	size_t v = n + offset;
667 
668 	/*
669 	 * The general case needs to access the page order in order
670 	 * to compute the page size.
671 	 * However, we mostly deal with order-0 pages and thus can
672 	 * avoid a possible cache line miss for requests that fit all
673 	 * page orders.
674 	 */
675 	if (n <= v && v <= PAGE_SIZE)
676 		return true;
677 
678 	head = compound_head(page);
679 	v += (page - head) << PAGE_SHIFT;
680 
681 	if (likely(n <= v && v <= (page_size(head))))
682 		return true;
683 	WARN_ON(1);
684 	return false;
685 }
686 
687 static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
688 			 struct iov_iter *i)
689 {
690 	if (unlikely(iov_iter_is_pipe(i))) {
691 		return copy_page_to_iter_pipe(page, offset, bytes, i);
692 	} else {
693 		void *kaddr = kmap_local_page(page);
694 		size_t wanted = _copy_to_iter(kaddr + offset, bytes, i);
695 		kunmap_local(kaddr);
696 		return wanted;
697 	}
698 }
699 
700 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
701 			 struct iov_iter *i)
702 {
703 	size_t res = 0;
704 	if (unlikely(!page_copy_sane(page, offset, bytes)))
705 		return 0;
706 	page += offset / PAGE_SIZE; // first subpage
707 	offset %= PAGE_SIZE;
708 	while (1) {
709 		size_t n = __copy_page_to_iter(page, offset,
710 				min(bytes, (size_t)PAGE_SIZE - offset), i);
711 		res += n;
712 		bytes -= n;
713 		if (!bytes || !n)
714 			break;
715 		offset += n;
716 		if (offset == PAGE_SIZE) {
717 			page++;
718 			offset = 0;
719 		}
720 	}
721 	return res;
722 }
723 EXPORT_SYMBOL(copy_page_to_iter);
724 
725 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
726 			 struct iov_iter *i)
727 {
728 	if (page_copy_sane(page, offset, bytes)) {
729 		void *kaddr = kmap_local_page(page);
730 		size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
731 		kunmap_local(kaddr);
732 		return wanted;
733 	}
734 	return 0;
735 }
736 EXPORT_SYMBOL(copy_page_from_iter);
737 
738 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
739 {
740 	struct pipe_inode_info *pipe = i->pipe;
741 	unsigned int p_mask = pipe->ring_size - 1;
742 	unsigned int i_head;
743 	size_t n, off;
744 
745 	if (!sanity(i))
746 		return 0;
747 
748 	bytes = n = push_pipe(i, bytes, &i_head, &off);
749 	if (unlikely(!n))
750 		return 0;
751 
752 	do {
753 		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
754 		char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
755 		memset(p + off, 0, chunk);
756 		kunmap_local(p);
757 		i->head = i_head;
758 		i->iov_offset = off + chunk;
759 		n -= chunk;
760 		off = 0;
761 		i_head++;
762 	} while (n);
763 	i->count -= bytes;
764 	return bytes;
765 }
766 
767 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
768 {
769 	if (unlikely(iov_iter_is_pipe(i)))
770 		return pipe_zero(bytes, i);
771 	iterate_and_advance(i, bytes, base, len, count,
772 		clear_user(base, len),
773 		memset(base, 0, len)
774 	)
775 
776 	return bytes;
777 }
778 EXPORT_SYMBOL(iov_iter_zero);
779 
780 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
781 				  struct iov_iter *i)
782 {
783 	char *kaddr = kmap_atomic(page), *p = kaddr + offset;
784 	if (unlikely(!page_copy_sane(page, offset, bytes))) {
785 		kunmap_atomic(kaddr);
786 		return 0;
787 	}
788 	if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
789 		kunmap_atomic(kaddr);
790 		WARN_ON(1);
791 		return 0;
792 	}
793 	iterate_and_advance(i, bytes, base, len, off,
794 		copyin(p + off, base, len),
795 		memcpy(p + off, base, len)
796 	)
797 	kunmap_atomic(kaddr);
798 	return bytes;
799 }
800 EXPORT_SYMBOL(copy_page_from_iter_atomic);
801 
802 static inline void pipe_truncate(struct iov_iter *i)
803 {
804 	struct pipe_inode_info *pipe = i->pipe;
805 	unsigned int p_tail = pipe->tail;
806 	unsigned int p_head = pipe->head;
807 	unsigned int p_mask = pipe->ring_size - 1;
808 
809 	if (!pipe_empty(p_head, p_tail)) {
810 		struct pipe_buffer *buf;
811 		unsigned int i_head = i->head;
812 		size_t off = i->iov_offset;
813 
814 		if (off) {
815 			buf = &pipe->bufs[i_head & p_mask];
816 			buf->len = off - buf->offset;
817 			i_head++;
818 		}
819 		while (p_head != i_head) {
820 			p_head--;
821 			pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]);
822 		}
823 
824 		pipe->head = p_head;
825 	}
826 }
827 
828 static void pipe_advance(struct iov_iter *i, size_t size)
829 {
830 	struct pipe_inode_info *pipe = i->pipe;
831 	if (size) {
832 		struct pipe_buffer *buf;
833 		unsigned int p_mask = pipe->ring_size - 1;
834 		unsigned int i_head = i->head;
835 		size_t off = i->iov_offset, left = size;
836 
837 		if (off) /* make it relative to the beginning of buffer */
838 			left += off - pipe->bufs[i_head & p_mask].offset;
839 		while (1) {
840 			buf = &pipe->bufs[i_head & p_mask];
841 			if (left <= buf->len)
842 				break;
843 			left -= buf->len;
844 			i_head++;
845 		}
846 		i->head = i_head;
847 		i->iov_offset = buf->offset + left;
848 	}
849 	i->count -= size;
850 	/* ... and discard everything past that point */
851 	pipe_truncate(i);
852 }
853 
854 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
855 {
856 	const struct bio_vec *bvec, *end;
857 
858 	if (!i->count)
859 		return;
860 	i->count -= size;
861 
862 	size += i->iov_offset;
863 
864 	for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) {
865 		if (likely(size < bvec->bv_len))
866 			break;
867 		size -= bvec->bv_len;
868 	}
869 	i->iov_offset = size;
870 	i->nr_segs -= bvec - i->bvec;
871 	i->bvec = bvec;
872 }
873 
874 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
875 {
876 	const struct iovec *iov, *end;
877 
878 	if (!i->count)
879 		return;
880 	i->count -= size;
881 
882 	size += i->iov_offset; // from beginning of current segment
883 	for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) {
884 		if (likely(size < iov->iov_len))
885 			break;
886 		size -= iov->iov_len;
887 	}
888 	i->iov_offset = size;
889 	i->nr_segs -= iov - i->iov;
890 	i->iov = iov;
891 }
892 
893 void iov_iter_advance(struct iov_iter *i, size_t size)
894 {
895 	if (unlikely(i->count < size))
896 		size = i->count;
897 	if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
898 		/* iovec and kvec have identical layouts */
899 		iov_iter_iovec_advance(i, size);
900 	} else if (iov_iter_is_bvec(i)) {
901 		iov_iter_bvec_advance(i, size);
902 	} else if (iov_iter_is_pipe(i)) {
903 		pipe_advance(i, size);
904 	} else if (unlikely(iov_iter_is_xarray(i))) {
905 		i->iov_offset += size;
906 		i->count -= size;
907 	} else if (iov_iter_is_discard(i)) {
908 		i->count -= size;
909 	}
910 }
911 EXPORT_SYMBOL(iov_iter_advance);
912 
913 void iov_iter_revert(struct iov_iter *i, size_t unroll)
914 {
915 	if (!unroll)
916 		return;
917 	if (WARN_ON(unroll > MAX_RW_COUNT))
918 		return;
919 	i->count += unroll;
920 	if (unlikely(iov_iter_is_pipe(i))) {
921 		struct pipe_inode_info *pipe = i->pipe;
922 		unsigned int p_mask = pipe->ring_size - 1;
923 		unsigned int i_head = i->head;
924 		size_t off = i->iov_offset;
925 		while (1) {
926 			struct pipe_buffer *b = &pipe->bufs[i_head & p_mask];
927 			size_t n = off - b->offset;
928 			if (unroll < n) {
929 				off -= unroll;
930 				break;
931 			}
932 			unroll -= n;
933 			if (!unroll && i_head == i->start_head) {
934 				off = 0;
935 				break;
936 			}
937 			i_head--;
938 			b = &pipe->bufs[i_head & p_mask];
939 			off = b->offset + b->len;
940 		}
941 		i->iov_offset = off;
942 		i->head = i_head;
943 		pipe_truncate(i);
944 		return;
945 	}
946 	if (unlikely(iov_iter_is_discard(i)))
947 		return;
948 	if (unroll <= i->iov_offset) {
949 		i->iov_offset -= unroll;
950 		return;
951 	}
952 	unroll -= i->iov_offset;
953 	if (iov_iter_is_xarray(i)) {
954 		BUG(); /* We should never go beyond the start of the specified
955 			* range since we might then be straying into pages that
956 			* aren't pinned.
957 			*/
958 	} else if (iov_iter_is_bvec(i)) {
959 		const struct bio_vec *bvec = i->bvec;
960 		while (1) {
961 			size_t n = (--bvec)->bv_len;
962 			i->nr_segs++;
963 			if (unroll <= n) {
964 				i->bvec = bvec;
965 				i->iov_offset = n - unroll;
966 				return;
967 			}
968 			unroll -= n;
969 		}
970 	} else { /* same logics for iovec and kvec */
971 		const struct iovec *iov = i->iov;
972 		while (1) {
973 			size_t n = (--iov)->iov_len;
974 			i->nr_segs++;
975 			if (unroll <= n) {
976 				i->iov = iov;
977 				i->iov_offset = n - unroll;
978 				return;
979 			}
980 			unroll -= n;
981 		}
982 	}
983 }
984 EXPORT_SYMBOL(iov_iter_revert);
985 
986 /*
987  * Return the count of just the current iov_iter segment.
988  */
989 size_t iov_iter_single_seg_count(const struct iov_iter *i)
990 {
991 	if (i->nr_segs > 1) {
992 		if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
993 			return min(i->count, i->iov->iov_len - i->iov_offset);
994 		if (iov_iter_is_bvec(i))
995 			return min(i->count, i->bvec->bv_len - i->iov_offset);
996 	}
997 	return i->count;
998 }
999 EXPORT_SYMBOL(iov_iter_single_seg_count);
1000 
1001 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
1002 			const struct kvec *kvec, unsigned long nr_segs,
1003 			size_t count)
1004 {
1005 	WARN_ON(direction & ~(READ | WRITE));
1006 	*i = (struct iov_iter){
1007 		.iter_type = ITER_KVEC,
1008 		.data_source = direction,
1009 		.kvec = kvec,
1010 		.nr_segs = nr_segs,
1011 		.iov_offset = 0,
1012 		.count = count
1013 	};
1014 }
1015 EXPORT_SYMBOL(iov_iter_kvec);
1016 
1017 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1018 			const struct bio_vec *bvec, unsigned long nr_segs,
1019 			size_t count)
1020 {
1021 	WARN_ON(direction & ~(READ | WRITE));
1022 	*i = (struct iov_iter){
1023 		.iter_type = ITER_BVEC,
1024 		.data_source = direction,
1025 		.bvec = bvec,
1026 		.nr_segs = nr_segs,
1027 		.iov_offset = 0,
1028 		.count = count
1029 	};
1030 }
1031 EXPORT_SYMBOL(iov_iter_bvec);
1032 
1033 void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1034 			struct pipe_inode_info *pipe,
1035 			size_t count)
1036 {
1037 	BUG_ON(direction != READ);
1038 	WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
1039 	*i = (struct iov_iter){
1040 		.iter_type = ITER_PIPE,
1041 		.data_source = false,
1042 		.pipe = pipe,
1043 		.head = pipe->head,
1044 		.start_head = pipe->head,
1045 		.iov_offset = 0,
1046 		.count = count
1047 	};
1048 }
1049 EXPORT_SYMBOL(iov_iter_pipe);
1050 
1051 /**
1052  * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
1053  * @i: The iterator to initialise.
1054  * @direction: The direction of the transfer.
1055  * @xarray: The xarray to access.
1056  * @start: The start file position.
1057  * @count: The size of the I/O buffer in bytes.
1058  *
1059  * Set up an I/O iterator to either draw data out of the pages attached to an
1060  * inode or to inject data into those pages.  The pages *must* be prevented
1061  * from evaporation, either by taking a ref on them or locking them by the
1062  * caller.
1063  */
1064 void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
1065 		     struct xarray *xarray, loff_t start, size_t count)
1066 {
1067 	BUG_ON(direction & ~1);
1068 	*i = (struct iov_iter) {
1069 		.iter_type = ITER_XARRAY,
1070 		.data_source = direction,
1071 		.xarray = xarray,
1072 		.xarray_start = start,
1073 		.count = count,
1074 		.iov_offset = 0
1075 	};
1076 }
1077 EXPORT_SYMBOL(iov_iter_xarray);
1078 
1079 /**
1080  * iov_iter_discard - Initialise an I/O iterator that discards data
1081  * @i: The iterator to initialise.
1082  * @direction: The direction of the transfer.
1083  * @count: The size of the I/O buffer in bytes.
1084  *
1085  * Set up an I/O iterator that just discards everything that's written to it.
1086  * It's only available as a READ iterator.
1087  */
1088 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1089 {
1090 	BUG_ON(direction != READ);
1091 	*i = (struct iov_iter){
1092 		.iter_type = ITER_DISCARD,
1093 		.data_source = false,
1094 		.count = count,
1095 		.iov_offset = 0
1096 	};
1097 }
1098 EXPORT_SYMBOL(iov_iter_discard);
1099 
1100 static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
1101 				   unsigned len_mask)
1102 {
1103 	size_t size = i->count;
1104 	size_t skip = i->iov_offset;
1105 	unsigned k;
1106 
1107 	for (k = 0; k < i->nr_segs; k++, skip = 0) {
1108 		size_t len = i->iov[k].iov_len - skip;
1109 
1110 		if (len > size)
1111 			len = size;
1112 		if (len & len_mask)
1113 			return false;
1114 		if ((unsigned long)(i->iov[k].iov_base + skip) & addr_mask)
1115 			return false;
1116 
1117 		size -= len;
1118 		if (!size)
1119 			break;
1120 	}
1121 	return true;
1122 }
1123 
1124 static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
1125 				  unsigned len_mask)
1126 {
1127 	size_t size = i->count;
1128 	unsigned skip = i->iov_offset;
1129 	unsigned k;
1130 
1131 	for (k = 0; k < i->nr_segs; k++, skip = 0) {
1132 		size_t len = i->bvec[k].bv_len - skip;
1133 
1134 		if (len > size)
1135 			len = size;
1136 		if (len & len_mask)
1137 			return false;
1138 		if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask)
1139 			return false;
1140 
1141 		size -= len;
1142 		if (!size)
1143 			break;
1144 	}
1145 	return true;
1146 }
1147 
1148 /**
1149  * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
1150  * 	are aligned to the parameters.
1151  *
1152  * @i: &struct iov_iter to restore
1153  * @addr_mask: bit mask to check against the iov element's addresses
1154  * @len_mask: bit mask to check against the iov element's lengths
1155  *
1156  * Return: false if any addresses or lengths intersect with the provided masks
1157  */
1158 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
1159 			 unsigned len_mask)
1160 {
1161 	if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1162 		return iov_iter_aligned_iovec(i, addr_mask, len_mask);
1163 
1164 	if (iov_iter_is_bvec(i))
1165 		return iov_iter_aligned_bvec(i, addr_mask, len_mask);
1166 
1167 	if (iov_iter_is_pipe(i)) {
1168 		unsigned int p_mask = i->pipe->ring_size - 1;
1169 		size_t size = i->count;
1170 
1171 		if (size & len_mask)
1172 			return false;
1173 		if (size && allocated(&i->pipe->bufs[i->head & p_mask])) {
1174 			if (i->iov_offset & addr_mask)
1175 				return false;
1176 		}
1177 
1178 		return true;
1179 	}
1180 
1181 	if (iov_iter_is_xarray(i)) {
1182 		if (i->count & len_mask)
1183 			return false;
1184 		if ((i->xarray_start + i->iov_offset) & addr_mask)
1185 			return false;
1186 	}
1187 
1188 	return true;
1189 }
1190 EXPORT_SYMBOL_GPL(iov_iter_is_aligned);
1191 
1192 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
1193 {
1194 	unsigned long res = 0;
1195 	size_t size = i->count;
1196 	size_t skip = i->iov_offset;
1197 	unsigned k;
1198 
1199 	for (k = 0; k < i->nr_segs; k++, skip = 0) {
1200 		size_t len = i->iov[k].iov_len - skip;
1201 		if (len) {
1202 			res |= (unsigned long)i->iov[k].iov_base + skip;
1203 			if (len > size)
1204 				len = size;
1205 			res |= len;
1206 			size -= len;
1207 			if (!size)
1208 				break;
1209 		}
1210 	}
1211 	return res;
1212 }
1213 
1214 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
1215 {
1216 	unsigned res = 0;
1217 	size_t size = i->count;
1218 	unsigned skip = i->iov_offset;
1219 	unsigned k;
1220 
1221 	for (k = 0; k < i->nr_segs; k++, skip = 0) {
1222 		size_t len = i->bvec[k].bv_len - skip;
1223 		res |= (unsigned long)i->bvec[k].bv_offset + skip;
1224 		if (len > size)
1225 			len = size;
1226 		res |= len;
1227 		size -= len;
1228 		if (!size)
1229 			break;
1230 	}
1231 	return res;
1232 }
1233 
1234 unsigned long iov_iter_alignment(const struct iov_iter *i)
1235 {
1236 	/* iovec and kvec have identical layouts */
1237 	if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1238 		return iov_iter_alignment_iovec(i);
1239 
1240 	if (iov_iter_is_bvec(i))
1241 		return iov_iter_alignment_bvec(i);
1242 
1243 	if (iov_iter_is_pipe(i)) {
1244 		unsigned int p_mask = i->pipe->ring_size - 1;
1245 		size_t size = i->count;
1246 
1247 		if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
1248 			return size | i->iov_offset;
1249 		return size;
1250 	}
1251 
1252 	if (iov_iter_is_xarray(i))
1253 		return (i->xarray_start + i->iov_offset) | i->count;
1254 
1255 	return 0;
1256 }
1257 EXPORT_SYMBOL(iov_iter_alignment);
1258 
1259 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1260 {
1261 	unsigned long res = 0;
1262 	unsigned long v = 0;
1263 	size_t size = i->count;
1264 	unsigned k;
1265 
1266 	if (WARN_ON(!iter_is_iovec(i)))
1267 		return ~0U;
1268 
1269 	for (k = 0; k < i->nr_segs; k++) {
1270 		if (i->iov[k].iov_len) {
1271 			unsigned long base = (unsigned long)i->iov[k].iov_base;
1272 			if (v) // if not the first one
1273 				res |= base | v; // this start | previous end
1274 			v = base + i->iov[k].iov_len;
1275 			if (size <= i->iov[k].iov_len)
1276 				break;
1277 			size -= i->iov[k].iov_len;
1278 		}
1279 	}
1280 	return res;
1281 }
1282 EXPORT_SYMBOL(iov_iter_gap_alignment);
1283 
1284 static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1285 				size_t maxsize,
1286 				struct page **pages,
1287 				int iter_head,
1288 				size_t *start)
1289 {
1290 	struct pipe_inode_info *pipe = i->pipe;
1291 	unsigned int p_mask = pipe->ring_size - 1;
1292 	ssize_t n = push_pipe(i, maxsize, &iter_head, start);
1293 	if (!n)
1294 		return -EFAULT;
1295 
1296 	maxsize = n;
1297 	n += *start;
1298 	while (n > 0) {
1299 		get_page(*pages++ = pipe->bufs[iter_head & p_mask].page);
1300 		iter_head++;
1301 		n -= PAGE_SIZE;
1302 	}
1303 
1304 	return maxsize;
1305 }
1306 
1307 static ssize_t pipe_get_pages(struct iov_iter *i,
1308 		   struct page **pages, size_t maxsize, unsigned maxpages,
1309 		   size_t *start)
1310 {
1311 	unsigned int iter_head, npages;
1312 	size_t capacity;
1313 
1314 	if (!sanity(i))
1315 		return -EFAULT;
1316 
1317 	data_start(i, &iter_head, start);
1318 	/* Amount of free space: some of this one + all after this one */
1319 	npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1320 	capacity = min(npages, maxpages) * PAGE_SIZE - *start;
1321 
1322 	return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
1323 }
1324 
1325 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
1326 					  pgoff_t index, unsigned int nr_pages)
1327 {
1328 	XA_STATE(xas, xa, index);
1329 	struct page *page;
1330 	unsigned int ret = 0;
1331 
1332 	rcu_read_lock();
1333 	for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1334 		if (xas_retry(&xas, page))
1335 			continue;
1336 
1337 		/* Has the page moved or been split? */
1338 		if (unlikely(page != xas_reload(&xas))) {
1339 			xas_reset(&xas);
1340 			continue;
1341 		}
1342 
1343 		pages[ret] = find_subpage(page, xas.xa_index);
1344 		get_page(pages[ret]);
1345 		if (++ret == nr_pages)
1346 			break;
1347 	}
1348 	rcu_read_unlock();
1349 	return ret;
1350 }
1351 
1352 static ssize_t iter_xarray_get_pages(struct iov_iter *i,
1353 				     struct page **pages, size_t maxsize,
1354 				     unsigned maxpages, size_t *_start_offset)
1355 {
1356 	unsigned nr, offset;
1357 	pgoff_t index, count;
1358 	size_t size = maxsize;
1359 	loff_t pos;
1360 
1361 	if (!size || !maxpages)
1362 		return 0;
1363 
1364 	pos = i->xarray_start + i->iov_offset;
1365 	index = pos >> PAGE_SHIFT;
1366 	offset = pos & ~PAGE_MASK;
1367 	*_start_offset = offset;
1368 
1369 	count = 1;
1370 	if (size > PAGE_SIZE - offset) {
1371 		size -= PAGE_SIZE - offset;
1372 		count += size >> PAGE_SHIFT;
1373 		size &= ~PAGE_MASK;
1374 		if (size)
1375 			count++;
1376 	}
1377 
1378 	if (count > maxpages)
1379 		count = maxpages;
1380 
1381 	nr = iter_xarray_populate_pages(pages, i->xarray, index, count);
1382 	if (nr == 0)
1383 		return 0;
1384 
1385 	return min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
1386 }
1387 
1388 /* must be done on non-empty ITER_IOVEC one */
1389 static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
1390 {
1391 	size_t skip;
1392 	long k;
1393 
1394 	for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
1395 		size_t len = i->iov[k].iov_len - skip;
1396 
1397 		if (unlikely(!len))
1398 			continue;
1399 		if (*size > len)
1400 			*size = len;
1401 		return (unsigned long)i->iov[k].iov_base + skip;
1402 	}
1403 	BUG(); // if it had been empty, we wouldn't get called
1404 }
1405 
1406 /* must be done on non-empty ITER_BVEC one */
1407 static struct page *first_bvec_segment(const struct iov_iter *i,
1408 				       size_t *size, size_t *start)
1409 {
1410 	struct page *page;
1411 	size_t skip = i->iov_offset, len;
1412 
1413 	len = i->bvec->bv_len - skip;
1414 	if (*size > len)
1415 		*size = len;
1416 	skip += i->bvec->bv_offset;
1417 	page = i->bvec->bv_page + skip / PAGE_SIZE;
1418 	*start = skip % PAGE_SIZE;
1419 	return page;
1420 }
1421 
1422 ssize_t iov_iter_get_pages(struct iov_iter *i,
1423 		   struct page **pages, size_t maxsize, unsigned maxpages,
1424 		   size_t *start)
1425 {
1426 	int n, res;
1427 
1428 	if (maxsize > i->count)
1429 		maxsize = i->count;
1430 	if (!maxsize)
1431 		return 0;
1432 	if (maxsize > MAX_RW_COUNT)
1433 		maxsize = MAX_RW_COUNT;
1434 
1435 	if (likely(iter_is_iovec(i))) {
1436 		unsigned int gup_flags = 0;
1437 		unsigned long addr;
1438 
1439 		if (iov_iter_rw(i) != WRITE)
1440 			gup_flags |= FOLL_WRITE;
1441 		if (i->nofault)
1442 			gup_flags |= FOLL_NOFAULT;
1443 
1444 		addr = first_iovec_segment(i, &maxsize);
1445 		*start = addr % PAGE_SIZE;
1446 		addr &= PAGE_MASK;
1447 		n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1448 		if (n > maxpages)
1449 			n = maxpages;
1450 		res = get_user_pages_fast(addr, n, gup_flags, pages);
1451 		if (unlikely(res <= 0))
1452 			return res;
1453 		return min_t(size_t, maxsize, res * PAGE_SIZE - *start);
1454 	}
1455 	if (iov_iter_is_bvec(i)) {
1456 		struct page *page;
1457 
1458 		page = first_bvec_segment(i, &maxsize, start);
1459 		n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1460 		if (n > maxpages)
1461 			n = maxpages;
1462 		for (int k = 0; k < n; k++)
1463 			get_page(*pages++ = page++);
1464 		return min_t(size_t, maxsize, n * PAGE_SIZE - *start);
1465 	}
1466 	if (iov_iter_is_pipe(i))
1467 		return pipe_get_pages(i, pages, maxsize, maxpages, start);
1468 	if (iov_iter_is_xarray(i))
1469 		return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
1470 	return -EFAULT;
1471 }
1472 EXPORT_SYMBOL(iov_iter_get_pages);
1473 
1474 static struct page **get_pages_array(size_t n)
1475 {
1476 	return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1477 }
1478 
1479 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1480 		   struct page ***pages, size_t maxsize,
1481 		   size_t *start)
1482 {
1483 	struct page **p;
1484 	unsigned int iter_head, npages;
1485 	ssize_t n;
1486 
1487 	if (!sanity(i))
1488 		return -EFAULT;
1489 
1490 	data_start(i, &iter_head, start);
1491 	/* Amount of free space: some of this one + all after this one */
1492 	npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1493 	n = npages * PAGE_SIZE - *start;
1494 	if (maxsize > n)
1495 		maxsize = n;
1496 	else
1497 		npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1498 	p = get_pages_array(npages);
1499 	if (!p)
1500 		return -ENOMEM;
1501 	n = __pipe_get_pages(i, maxsize, p, iter_head, start);
1502 	if (n > 0)
1503 		*pages = p;
1504 	else
1505 		kvfree(p);
1506 	return n;
1507 }
1508 
1509 static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i,
1510 					   struct page ***pages, size_t maxsize,
1511 					   size_t *_start_offset)
1512 {
1513 	struct page **p;
1514 	unsigned nr, offset;
1515 	pgoff_t index, count;
1516 	size_t size = maxsize;
1517 	loff_t pos;
1518 
1519 	if (!size)
1520 		return 0;
1521 
1522 	pos = i->xarray_start + i->iov_offset;
1523 	index = pos >> PAGE_SHIFT;
1524 	offset = pos & ~PAGE_MASK;
1525 	*_start_offset = offset;
1526 
1527 	count = 1;
1528 	if (size > PAGE_SIZE - offset) {
1529 		size -= PAGE_SIZE - offset;
1530 		count += size >> PAGE_SHIFT;
1531 		size &= ~PAGE_MASK;
1532 		if (size)
1533 			count++;
1534 	}
1535 
1536 	p = get_pages_array(count);
1537 	if (!p)
1538 		return -ENOMEM;
1539 	*pages = p;
1540 
1541 	nr = iter_xarray_populate_pages(p, i->xarray, index, count);
1542 	if (nr == 0)
1543 		return 0;
1544 
1545 	return min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
1546 }
1547 
1548 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1549 		   struct page ***pages, size_t maxsize,
1550 		   size_t *start)
1551 {
1552 	struct page **p;
1553 	int n, res;
1554 
1555 	if (maxsize > i->count)
1556 		maxsize = i->count;
1557 	if (!maxsize)
1558 		return 0;
1559 	if (maxsize > MAX_RW_COUNT)
1560 		maxsize = MAX_RW_COUNT;
1561 
1562 	if (likely(iter_is_iovec(i))) {
1563 		unsigned int gup_flags = 0;
1564 		unsigned long addr;
1565 
1566 		if (iov_iter_rw(i) != WRITE)
1567 			gup_flags |= FOLL_WRITE;
1568 		if (i->nofault)
1569 			gup_flags |= FOLL_NOFAULT;
1570 
1571 		addr = first_iovec_segment(i, &maxsize);
1572 		*start = addr % PAGE_SIZE;
1573 		addr &= PAGE_MASK;
1574 		n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1575 		p = get_pages_array(n);
1576 		if (!p)
1577 			return -ENOMEM;
1578 		res = get_user_pages_fast(addr, n, gup_flags, p);
1579 		if (unlikely(res <= 0)) {
1580 			kvfree(p);
1581 			*pages = NULL;
1582 			return res;
1583 		}
1584 		*pages = p;
1585 		return min_t(size_t, maxsize, res * PAGE_SIZE - *start);
1586 	}
1587 	if (iov_iter_is_bvec(i)) {
1588 		struct page *page;
1589 
1590 		page = first_bvec_segment(i, &maxsize, start);
1591 		n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1592 		*pages = p = get_pages_array(n);
1593 		if (!p)
1594 			return -ENOMEM;
1595 		for (int k = 0; k < n; k++)
1596 			get_page(*p++ = page++);
1597 		return min_t(size_t, maxsize, n * PAGE_SIZE - *start);
1598 	}
1599 	if (iov_iter_is_pipe(i))
1600 		return pipe_get_pages_alloc(i, pages, maxsize, start);
1601 	if (iov_iter_is_xarray(i))
1602 		return iter_xarray_get_pages_alloc(i, pages, maxsize, start);
1603 	return -EFAULT;
1604 }
1605 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1606 
1607 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1608 			       struct iov_iter *i)
1609 {
1610 	__wsum sum, next;
1611 	sum = *csum;
1612 	if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1613 		WARN_ON(1);
1614 		return 0;
1615 	}
1616 	iterate_and_advance(i, bytes, base, len, off, ({
1617 		next = csum_and_copy_from_user(base, addr + off, len);
1618 		sum = csum_block_add(sum, next, off);
1619 		next ? 0 : len;
1620 	}), ({
1621 		sum = csum_and_memcpy(addr + off, base, len, sum, off);
1622 	})
1623 	)
1624 	*csum = sum;
1625 	return bytes;
1626 }
1627 EXPORT_SYMBOL(csum_and_copy_from_iter);
1628 
1629 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
1630 			     struct iov_iter *i)
1631 {
1632 	struct csum_state *csstate = _csstate;
1633 	__wsum sum, next;
1634 
1635 	if (unlikely(iov_iter_is_discard(i))) {
1636 		WARN_ON(1);	/* for now */
1637 		return 0;
1638 	}
1639 
1640 	sum = csum_shift(csstate->csum, csstate->off);
1641 	if (unlikely(iov_iter_is_pipe(i)))
1642 		bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum);
1643 	else iterate_and_advance(i, bytes, base, len, off, ({
1644 		next = csum_and_copy_to_user(addr + off, base, len);
1645 		sum = csum_block_add(sum, next, off);
1646 		next ? 0 : len;
1647 	}), ({
1648 		sum = csum_and_memcpy(base, addr + off, len, sum, off);
1649 	})
1650 	)
1651 	csstate->csum = csum_shift(sum, csstate->off);
1652 	csstate->off += bytes;
1653 	return bytes;
1654 }
1655 EXPORT_SYMBOL(csum_and_copy_to_iter);
1656 
1657 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1658 		struct iov_iter *i)
1659 {
1660 #ifdef CONFIG_CRYPTO_HASH
1661 	struct ahash_request *hash = hashp;
1662 	struct scatterlist sg;
1663 	size_t copied;
1664 
1665 	copied = copy_to_iter(addr, bytes, i);
1666 	sg_init_one(&sg, addr, copied);
1667 	ahash_request_set_crypt(hash, &sg, NULL, copied);
1668 	crypto_ahash_update(hash);
1669 	return copied;
1670 #else
1671 	return 0;
1672 #endif
1673 }
1674 EXPORT_SYMBOL(hash_and_copy_to_iter);
1675 
1676 static int iov_npages(const struct iov_iter *i, int maxpages)
1677 {
1678 	size_t skip = i->iov_offset, size = i->count;
1679 	const struct iovec *p;
1680 	int npages = 0;
1681 
1682 	for (p = i->iov; size; skip = 0, p++) {
1683 		unsigned offs = offset_in_page(p->iov_base + skip);
1684 		size_t len = min(p->iov_len - skip, size);
1685 
1686 		if (len) {
1687 			size -= len;
1688 			npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1689 			if (unlikely(npages > maxpages))
1690 				return maxpages;
1691 		}
1692 	}
1693 	return npages;
1694 }
1695 
1696 static int bvec_npages(const struct iov_iter *i, int maxpages)
1697 {
1698 	size_t skip = i->iov_offset, size = i->count;
1699 	const struct bio_vec *p;
1700 	int npages = 0;
1701 
1702 	for (p = i->bvec; size; skip = 0, p++) {
1703 		unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
1704 		size_t len = min(p->bv_len - skip, size);
1705 
1706 		size -= len;
1707 		npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1708 		if (unlikely(npages > maxpages))
1709 			return maxpages;
1710 	}
1711 	return npages;
1712 }
1713 
1714 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1715 {
1716 	if (unlikely(!i->count))
1717 		return 0;
1718 	/* iovec and kvec have identical layouts */
1719 	if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1720 		return iov_npages(i, maxpages);
1721 	if (iov_iter_is_bvec(i))
1722 		return bvec_npages(i, maxpages);
1723 	if (iov_iter_is_pipe(i)) {
1724 		unsigned int iter_head;
1725 		int npages;
1726 		size_t off;
1727 
1728 		if (!sanity(i))
1729 			return 0;
1730 
1731 		data_start(i, &iter_head, &off);
1732 		/* some of this one + all after this one */
1733 		npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1734 		return min(npages, maxpages);
1735 	}
1736 	if (iov_iter_is_xarray(i)) {
1737 		unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
1738 		int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
1739 		return min(npages, maxpages);
1740 	}
1741 	return 0;
1742 }
1743 EXPORT_SYMBOL(iov_iter_npages);
1744 
1745 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1746 {
1747 	*new = *old;
1748 	if (unlikely(iov_iter_is_pipe(new))) {
1749 		WARN_ON(1);
1750 		return NULL;
1751 	}
1752 	if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new)))
1753 		return NULL;
1754 	if (iov_iter_is_bvec(new))
1755 		return new->bvec = kmemdup(new->bvec,
1756 				    new->nr_segs * sizeof(struct bio_vec),
1757 				    flags);
1758 	else
1759 		/* iovec and kvec have identical layout */
1760 		return new->iov = kmemdup(new->iov,
1761 				   new->nr_segs * sizeof(struct iovec),
1762 				   flags);
1763 }
1764 EXPORT_SYMBOL(dup_iter);
1765 
1766 static int copy_compat_iovec_from_user(struct iovec *iov,
1767 		const struct iovec __user *uvec, unsigned long nr_segs)
1768 {
1769 	const struct compat_iovec __user *uiov =
1770 		(const struct compat_iovec __user *)uvec;
1771 	int ret = -EFAULT, i;
1772 
1773 	if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1774 		return -EFAULT;
1775 
1776 	for (i = 0; i < nr_segs; i++) {
1777 		compat_uptr_t buf;
1778 		compat_ssize_t len;
1779 
1780 		unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1781 		unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1782 
1783 		/* check for compat_size_t not fitting in compat_ssize_t .. */
1784 		if (len < 0) {
1785 			ret = -EINVAL;
1786 			goto uaccess_end;
1787 		}
1788 		iov[i].iov_base = compat_ptr(buf);
1789 		iov[i].iov_len = len;
1790 	}
1791 
1792 	ret = 0;
1793 uaccess_end:
1794 	user_access_end();
1795 	return ret;
1796 }
1797 
1798 static int copy_iovec_from_user(struct iovec *iov,
1799 		const struct iovec __user *uvec, unsigned long nr_segs)
1800 {
1801 	unsigned long seg;
1802 
1803 	if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
1804 		return -EFAULT;
1805 	for (seg = 0; seg < nr_segs; seg++) {
1806 		if ((ssize_t)iov[seg].iov_len < 0)
1807 			return -EINVAL;
1808 	}
1809 
1810 	return 0;
1811 }
1812 
1813 struct iovec *iovec_from_user(const struct iovec __user *uvec,
1814 		unsigned long nr_segs, unsigned long fast_segs,
1815 		struct iovec *fast_iov, bool compat)
1816 {
1817 	struct iovec *iov = fast_iov;
1818 	int ret;
1819 
1820 	/*
1821 	 * SuS says "The readv() function *may* fail if the iovcnt argument was
1822 	 * less than or equal to 0, or greater than {IOV_MAX}.  Linux has
1823 	 * traditionally returned zero for zero segments, so...
1824 	 */
1825 	if (nr_segs == 0)
1826 		return iov;
1827 	if (nr_segs > UIO_MAXIOV)
1828 		return ERR_PTR(-EINVAL);
1829 	if (nr_segs > fast_segs) {
1830 		iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
1831 		if (!iov)
1832 			return ERR_PTR(-ENOMEM);
1833 	}
1834 
1835 	if (compat)
1836 		ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1837 	else
1838 		ret = copy_iovec_from_user(iov, uvec, nr_segs);
1839 	if (ret) {
1840 		if (iov != fast_iov)
1841 			kfree(iov);
1842 		return ERR_PTR(ret);
1843 	}
1844 
1845 	return iov;
1846 }
1847 
1848 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1849 		 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1850 		 struct iov_iter *i, bool compat)
1851 {
1852 	ssize_t total_len = 0;
1853 	unsigned long seg;
1854 	struct iovec *iov;
1855 
1856 	iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1857 	if (IS_ERR(iov)) {
1858 		*iovp = NULL;
1859 		return PTR_ERR(iov);
1860 	}
1861 
1862 	/*
1863 	 * According to the Single Unix Specification we should return EINVAL if
1864 	 * an element length is < 0 when cast to ssize_t or if the total length
1865 	 * would overflow the ssize_t return value of the system call.
1866 	 *
1867 	 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1868 	 * overflow case.
1869 	 */
1870 	for (seg = 0; seg < nr_segs; seg++) {
1871 		ssize_t len = (ssize_t)iov[seg].iov_len;
1872 
1873 		if (!access_ok(iov[seg].iov_base, len)) {
1874 			if (iov != *iovp)
1875 				kfree(iov);
1876 			*iovp = NULL;
1877 			return -EFAULT;
1878 		}
1879 
1880 		if (len > MAX_RW_COUNT - total_len) {
1881 			len = MAX_RW_COUNT - total_len;
1882 			iov[seg].iov_len = len;
1883 		}
1884 		total_len += len;
1885 	}
1886 
1887 	iov_iter_init(i, type, iov, nr_segs, total_len);
1888 	if (iov == *iovp)
1889 		*iovp = NULL;
1890 	else
1891 		*iovp = iov;
1892 	return total_len;
1893 }
1894 
1895 /**
1896  * import_iovec() - Copy an array of &struct iovec from userspace
1897  *     into the kernel, check that it is valid, and initialize a new
1898  *     &struct iov_iter iterator to access it.
1899  *
1900  * @type: One of %READ or %WRITE.
1901  * @uvec: Pointer to the userspace array.
1902  * @nr_segs: Number of elements in userspace array.
1903  * @fast_segs: Number of elements in @iov.
1904  * @iovp: (input and output parameter) Pointer to pointer to (usually small
1905  *     on-stack) kernel array.
1906  * @i: Pointer to iterator that will be initialized on success.
1907  *
1908  * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1909  * then this function places %NULL in *@iov on return. Otherwise, a new
1910  * array will be allocated and the result placed in *@iov. This means that
1911  * the caller may call kfree() on *@iov regardless of whether the small
1912  * on-stack array was used or not (and regardless of whether this function
1913  * returns an error or not).
1914  *
1915  * Return: Negative error code on error, bytes imported on success
1916  */
1917 ssize_t import_iovec(int type, const struct iovec __user *uvec,
1918 		 unsigned nr_segs, unsigned fast_segs,
1919 		 struct iovec **iovp, struct iov_iter *i)
1920 {
1921 	return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
1922 			      in_compat_syscall());
1923 }
1924 EXPORT_SYMBOL(import_iovec);
1925 
1926 int import_single_range(int rw, void __user *buf, size_t len,
1927 		 struct iovec *iov, struct iov_iter *i)
1928 {
1929 	if (len > MAX_RW_COUNT)
1930 		len = MAX_RW_COUNT;
1931 	if (unlikely(!access_ok(buf, len)))
1932 		return -EFAULT;
1933 
1934 	iov->iov_base = buf;
1935 	iov->iov_len = len;
1936 	iov_iter_init(i, rw, iov, 1, len);
1937 	return 0;
1938 }
1939 EXPORT_SYMBOL(import_single_range);
1940 
1941 /**
1942  * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
1943  *     iov_iter_save_state() was called.
1944  *
1945  * @i: &struct iov_iter to restore
1946  * @state: state to restore from
1947  *
1948  * Used after iov_iter_save_state() to bring restore @i, if operations may
1949  * have advanced it.
1950  *
1951  * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
1952  */
1953 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
1954 {
1955 	if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) &&
1956 			 !iov_iter_is_kvec(i))
1957 		return;
1958 	i->iov_offset = state->iov_offset;
1959 	i->count = state->count;
1960 	/*
1961 	 * For the *vec iters, nr_segs + iov is constant - if we increment
1962 	 * the vec, then we also decrement the nr_segs count. Hence we don't
1963 	 * need to track both of these, just one is enough and we can deduct
1964 	 * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
1965 	 * size, so we can just increment the iov pointer as they are unionzed.
1966 	 * ITER_BVEC _may_ be the same size on some archs, but on others it is
1967 	 * not. Be safe and handle it separately.
1968 	 */
1969 	BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
1970 	if (iov_iter_is_bvec(i))
1971 		i->bvec -= state->nr_segs - i->nr_segs;
1972 	else
1973 		i->iov -= state->nr_segs - i->nr_segs;
1974 	i->nr_segs = state->nr_segs;
1975 }
1976