xref: /openbmc/linux/lib/iov_iter.c (revision dd45ab9d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <crypto/hash.h>
3 #include <linux/export.h>
4 #include <linux/bvec.h>
5 #include <linux/fault-inject-usercopy.h>
6 #include <linux/uio.h>
7 #include <linux/pagemap.h>
8 #include <linux/highmem.h>
9 #include <linux/slab.h>
10 #include <linux/vmalloc.h>
11 #include <linux/splice.h>
12 #include <linux/compat.h>
13 #include <net/checksum.h>
14 #include <linux/scatterlist.h>
15 #include <linux/instrumented.h>
16 
17 #define PIPE_PARANOIA /* for now */
18 
19 /* covers iovec and kvec alike */
20 #define iterate_iovec(i, n, base, len, off, __p, STEP) {	\
21 	size_t off = 0;						\
22 	size_t skip = i->iov_offset;				\
23 	do {							\
24 		len = min(n, __p->iov_len - skip);		\
25 		if (likely(len)) {				\
26 			base = __p->iov_base + skip;		\
27 			len -= (STEP);				\
28 			off += len;				\
29 			skip += len;				\
30 			n -= len;				\
31 			if (skip < __p->iov_len)		\
32 				break;				\
33 		}						\
34 		__p++;						\
35 		skip = 0;					\
36 	} while (n);						\
37 	i->iov_offset = skip;					\
38 	n = off;						\
39 }
40 
41 #define iterate_bvec(i, n, base, len, off, p, STEP) {		\
42 	size_t off = 0;						\
43 	unsigned skip = i->iov_offset;				\
44 	while (n) {						\
45 		unsigned offset = p->bv_offset + skip;		\
46 		unsigned left;					\
47 		void *kaddr = kmap_local_page(p->bv_page +	\
48 					offset / PAGE_SIZE);	\
49 		base = kaddr + offset % PAGE_SIZE;		\
50 		len = min(min(n, (size_t)(p->bv_len - skip)),	\
51 		     (size_t)(PAGE_SIZE - offset % PAGE_SIZE));	\
52 		left = (STEP);					\
53 		kunmap_local(kaddr);				\
54 		len -= left;					\
55 		off += len;					\
56 		skip += len;					\
57 		if (skip == p->bv_len) {			\
58 			skip = 0;				\
59 			p++;					\
60 		}						\
61 		n -= len;					\
62 		if (left)					\
63 			break;					\
64 	}							\
65 	i->iov_offset = skip;					\
66 	n = off;						\
67 }
68 
69 #define iterate_xarray(i, n, base, len, __off, STEP) {		\
70 	__label__ __out;					\
71 	size_t __off = 0;					\
72 	struct folio *folio;					\
73 	loff_t start = i->xarray_start + i->iov_offset;		\
74 	pgoff_t index = start / PAGE_SIZE;			\
75 	XA_STATE(xas, i->xarray, index);			\
76 								\
77 	len = PAGE_SIZE - offset_in_page(start);		\
78 	rcu_read_lock();					\
79 	xas_for_each(&xas, folio, ULONG_MAX) {			\
80 		unsigned left;					\
81 		size_t offset;					\
82 		if (xas_retry(&xas, folio))			\
83 			continue;				\
84 		if (WARN_ON(xa_is_value(folio)))		\
85 			break;					\
86 		if (WARN_ON(folio_test_hugetlb(folio)))		\
87 			break;					\
88 		offset = offset_in_folio(folio, start + __off);	\
89 		while (offset < folio_size(folio)) {		\
90 			base = kmap_local_folio(folio, offset);	\
91 			len = min(n, len);			\
92 			left = (STEP);				\
93 			kunmap_local(base);			\
94 			len -= left;				\
95 			__off += len;				\
96 			n -= len;				\
97 			if (left || n == 0)			\
98 				goto __out;			\
99 			offset += len;				\
100 			len = PAGE_SIZE;			\
101 		}						\
102 	}							\
103 __out:								\
104 	rcu_read_unlock();					\
105 	i->iov_offset += __off;					\
106 	n = __off;						\
107 }
108 
109 #define __iterate_and_advance(i, n, base, len, off, I, K) {	\
110 	if (unlikely(i->count < n))				\
111 		n = i->count;					\
112 	if (likely(n)) {					\
113 		if (likely(iter_is_iovec(i))) {			\
114 			const struct iovec *iov = i->iov;	\
115 			void __user *base;			\
116 			size_t len;				\
117 			iterate_iovec(i, n, base, len, off,	\
118 						iov, (I))	\
119 			i->nr_segs -= iov - i->iov;		\
120 			i->iov = iov;				\
121 		} else if (iov_iter_is_bvec(i)) {		\
122 			const struct bio_vec *bvec = i->bvec;	\
123 			void *base;				\
124 			size_t len;				\
125 			iterate_bvec(i, n, base, len, off,	\
126 						bvec, (K))	\
127 			i->nr_segs -= bvec - i->bvec;		\
128 			i->bvec = bvec;				\
129 		} else if (iov_iter_is_kvec(i)) {		\
130 			const struct kvec *kvec = i->kvec;	\
131 			void *base;				\
132 			size_t len;				\
133 			iterate_iovec(i, n, base, len, off,	\
134 						kvec, (K))	\
135 			i->nr_segs -= kvec - i->kvec;		\
136 			i->kvec = kvec;				\
137 		} else if (iov_iter_is_xarray(i)) {		\
138 			void *base;				\
139 			size_t len;				\
140 			iterate_xarray(i, n, base, len, off,	\
141 							(K))	\
142 		}						\
143 		i->count -= n;					\
144 	}							\
145 }
146 #define iterate_and_advance(i, n, base, len, off, I, K) \
147 	__iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
148 
149 static int copyout(void __user *to, const void *from, size_t n)
150 {
151 	if (should_fail_usercopy())
152 		return n;
153 	if (access_ok(to, n)) {
154 		instrument_copy_to_user(to, from, n);
155 		n = raw_copy_to_user(to, from, n);
156 	}
157 	return n;
158 }
159 
160 static int copyin(void *to, const void __user *from, size_t n)
161 {
162 	if (should_fail_usercopy())
163 		return n;
164 	if (access_ok(from, n)) {
165 		instrument_copy_from_user(to, from, n);
166 		n = raw_copy_from_user(to, from, n);
167 	}
168 	return n;
169 }
170 
171 #ifdef PIPE_PARANOIA
172 static bool sanity(const struct iov_iter *i)
173 {
174 	struct pipe_inode_info *pipe = i->pipe;
175 	unsigned int p_head = pipe->head;
176 	unsigned int p_tail = pipe->tail;
177 	unsigned int p_mask = pipe->ring_size - 1;
178 	unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
179 	unsigned int i_head = i->head;
180 	unsigned int idx;
181 
182 	if (i->iov_offset) {
183 		struct pipe_buffer *p;
184 		if (unlikely(p_occupancy == 0))
185 			goto Bad;	// pipe must be non-empty
186 		if (unlikely(i_head != p_head - 1))
187 			goto Bad;	// must be at the last buffer...
188 
189 		p = &pipe->bufs[i_head & p_mask];
190 		if (unlikely(p->offset + p->len != i->iov_offset))
191 			goto Bad;	// ... at the end of segment
192 	} else {
193 		if (i_head != p_head)
194 			goto Bad;	// must be right after the last buffer
195 	}
196 	return true;
197 Bad:
198 	printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset);
199 	printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
200 			p_head, p_tail, pipe->ring_size);
201 	for (idx = 0; idx < pipe->ring_size; idx++)
202 		printk(KERN_ERR "[%p %p %d %d]\n",
203 			pipe->bufs[idx].ops,
204 			pipe->bufs[idx].page,
205 			pipe->bufs[idx].offset,
206 			pipe->bufs[idx].len);
207 	WARN_ON(1);
208 	return false;
209 }
210 #else
211 #define sanity(i) true
212 #endif
213 
214 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
215 			 struct iov_iter *i)
216 {
217 	struct pipe_inode_info *pipe = i->pipe;
218 	struct pipe_buffer *buf;
219 	unsigned int p_tail = pipe->tail;
220 	unsigned int p_mask = pipe->ring_size - 1;
221 	unsigned int i_head = i->head;
222 	size_t off;
223 
224 	if (unlikely(bytes > i->count))
225 		bytes = i->count;
226 
227 	if (unlikely(!bytes))
228 		return 0;
229 
230 	if (!sanity(i))
231 		return 0;
232 
233 	off = i->iov_offset;
234 	buf = &pipe->bufs[i_head & p_mask];
235 	if (off) {
236 		if (offset == off && buf->page == page) {
237 			/* merge with the last one */
238 			buf->len += bytes;
239 			i->iov_offset += bytes;
240 			goto out;
241 		}
242 		i_head++;
243 		buf = &pipe->bufs[i_head & p_mask];
244 	}
245 	if (pipe_full(i_head, p_tail, pipe->max_usage))
246 		return 0;
247 
248 	buf->ops = &page_cache_pipe_buf_ops;
249 	buf->flags = 0;
250 	get_page(page);
251 	buf->page = page;
252 	buf->offset = offset;
253 	buf->len = bytes;
254 
255 	pipe->head = i_head + 1;
256 	i->iov_offset = offset + bytes;
257 	i->head = i_head;
258 out:
259 	i->count -= bytes;
260 	return bytes;
261 }
262 
263 /*
264  * fault_in_iov_iter_readable - fault in iov iterator for reading
265  * @i: iterator
266  * @size: maximum length
267  *
268  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
269  * @size.  For each iovec, fault in each page that constitutes the iovec.
270  *
271  * Returns the number of bytes not faulted in (like copy_to_user() and
272  * copy_from_user()).
273  *
274  * Always returns 0 for non-userspace iterators.
275  */
276 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
277 {
278 	if (iter_is_iovec(i)) {
279 		size_t count = min(size, iov_iter_count(i));
280 		const struct iovec *p;
281 		size_t skip;
282 
283 		size -= count;
284 		for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
285 			size_t len = min(count, p->iov_len - skip);
286 			size_t ret;
287 
288 			if (unlikely(!len))
289 				continue;
290 			ret = fault_in_readable(p->iov_base + skip, len);
291 			count -= len - ret;
292 			if (ret)
293 				break;
294 		}
295 		return count + size;
296 	}
297 	return 0;
298 }
299 EXPORT_SYMBOL(fault_in_iov_iter_readable);
300 
301 /*
302  * fault_in_iov_iter_writeable - fault in iov iterator for writing
303  * @i: iterator
304  * @size: maximum length
305  *
306  * Faults in the iterator using get_user_pages(), i.e., without triggering
307  * hardware page faults.  This is primarily useful when we already know that
308  * some or all of the pages in @i aren't in memory.
309  *
310  * Returns the number of bytes not faulted in, like copy_to_user() and
311  * copy_from_user().
312  *
313  * Always returns 0 for non-user-space iterators.
314  */
315 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
316 {
317 	if (iter_is_iovec(i)) {
318 		size_t count = min(size, iov_iter_count(i));
319 		const struct iovec *p;
320 		size_t skip;
321 
322 		size -= count;
323 		for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
324 			size_t len = min(count, p->iov_len - skip);
325 			size_t ret;
326 
327 			if (unlikely(!len))
328 				continue;
329 			ret = fault_in_safe_writeable(p->iov_base + skip, len);
330 			count -= len - ret;
331 			if (ret)
332 				break;
333 		}
334 		return count + size;
335 	}
336 	return 0;
337 }
338 EXPORT_SYMBOL(fault_in_iov_iter_writeable);
339 
340 void iov_iter_init(struct iov_iter *i, unsigned int direction,
341 			const struct iovec *iov, unsigned long nr_segs,
342 			size_t count)
343 {
344 	WARN_ON(direction & ~(READ | WRITE));
345 	*i = (struct iov_iter) {
346 		.iter_type = ITER_IOVEC,
347 		.nofault = false,
348 		.data_source = direction,
349 		.iov = iov,
350 		.nr_segs = nr_segs,
351 		.iov_offset = 0,
352 		.count = count
353 	};
354 }
355 EXPORT_SYMBOL(iov_iter_init);
356 
357 static inline bool allocated(struct pipe_buffer *buf)
358 {
359 	return buf->ops == &default_pipe_buf_ops;
360 }
361 
362 static inline void data_start(const struct iov_iter *i,
363 			      unsigned int *iter_headp, size_t *offp)
364 {
365 	unsigned int p_mask = i->pipe->ring_size - 1;
366 	unsigned int iter_head = i->head;
367 	size_t off = i->iov_offset;
368 
369 	if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) ||
370 		    off == PAGE_SIZE)) {
371 		iter_head++;
372 		off = 0;
373 	}
374 	*iter_headp = iter_head;
375 	*offp = off;
376 }
377 
378 static size_t push_pipe(struct iov_iter *i, size_t size,
379 			int *iter_headp, size_t *offp)
380 {
381 	struct pipe_inode_info *pipe = i->pipe;
382 	unsigned int p_tail = pipe->tail;
383 	unsigned int p_mask = pipe->ring_size - 1;
384 	unsigned int iter_head;
385 	size_t off;
386 	ssize_t left;
387 
388 	if (unlikely(size > i->count))
389 		size = i->count;
390 	if (unlikely(!size))
391 		return 0;
392 
393 	left = size;
394 	data_start(i, &iter_head, &off);
395 	*iter_headp = iter_head;
396 	*offp = off;
397 	if (off) {
398 		left -= PAGE_SIZE - off;
399 		if (left <= 0) {
400 			pipe->bufs[iter_head & p_mask].len += size;
401 			return size;
402 		}
403 		pipe->bufs[iter_head & p_mask].len = PAGE_SIZE;
404 		iter_head++;
405 	}
406 	while (!pipe_full(iter_head, p_tail, pipe->max_usage)) {
407 		struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask];
408 		struct page *page = alloc_page(GFP_USER);
409 		if (!page)
410 			break;
411 
412 		buf->ops = &default_pipe_buf_ops;
413 		buf->flags = 0;
414 		buf->page = page;
415 		buf->offset = 0;
416 		buf->len = min_t(ssize_t, left, PAGE_SIZE);
417 		left -= buf->len;
418 		iter_head++;
419 		pipe->head = iter_head;
420 
421 		if (left == 0)
422 			return size;
423 	}
424 	return size - left;
425 }
426 
427 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
428 				struct iov_iter *i)
429 {
430 	struct pipe_inode_info *pipe = i->pipe;
431 	unsigned int p_mask = pipe->ring_size - 1;
432 	unsigned int i_head;
433 	size_t n, off;
434 
435 	if (!sanity(i))
436 		return 0;
437 
438 	bytes = n = push_pipe(i, bytes, &i_head, &off);
439 	if (unlikely(!n))
440 		return 0;
441 	do {
442 		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
443 		memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk);
444 		i->head = i_head;
445 		i->iov_offset = off + chunk;
446 		n -= chunk;
447 		addr += chunk;
448 		off = 0;
449 		i_head++;
450 	} while (n);
451 	i->count -= bytes;
452 	return bytes;
453 }
454 
455 static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
456 			      __wsum sum, size_t off)
457 {
458 	__wsum next = csum_partial_copy_nocheck(from, to, len);
459 	return csum_block_add(sum, next, off);
460 }
461 
462 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
463 					 struct iov_iter *i, __wsum *sump)
464 {
465 	struct pipe_inode_info *pipe = i->pipe;
466 	unsigned int p_mask = pipe->ring_size - 1;
467 	__wsum sum = *sump;
468 	size_t off = 0;
469 	unsigned int i_head;
470 	size_t r;
471 
472 	if (!sanity(i))
473 		return 0;
474 
475 	bytes = push_pipe(i, bytes, &i_head, &r);
476 	while (bytes) {
477 		size_t chunk = min_t(size_t, bytes, PAGE_SIZE - r);
478 		char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
479 		sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off);
480 		kunmap_local(p);
481 		i->head = i_head;
482 		i->iov_offset = r + chunk;
483 		bytes -= chunk;
484 		off += chunk;
485 		r = 0;
486 		i_head++;
487 	}
488 	*sump = sum;
489 	i->count -= off;
490 	return off;
491 }
492 
493 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
494 {
495 	if (unlikely(iov_iter_is_pipe(i)))
496 		return copy_pipe_to_iter(addr, bytes, i);
497 	if (iter_is_iovec(i))
498 		might_fault();
499 	iterate_and_advance(i, bytes, base, len, off,
500 		copyout(base, addr + off, len),
501 		memcpy(base, addr + off, len)
502 	)
503 
504 	return bytes;
505 }
506 EXPORT_SYMBOL(_copy_to_iter);
507 
508 #ifdef CONFIG_ARCH_HAS_COPY_MC
509 static int copyout_mc(void __user *to, const void *from, size_t n)
510 {
511 	if (access_ok(to, n)) {
512 		instrument_copy_to_user(to, from, n);
513 		n = copy_mc_to_user((__force void *) to, from, n);
514 	}
515 	return n;
516 }
517 
518 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
519 				struct iov_iter *i)
520 {
521 	struct pipe_inode_info *pipe = i->pipe;
522 	unsigned int p_mask = pipe->ring_size - 1;
523 	unsigned int i_head;
524 	size_t n, off, xfer = 0;
525 
526 	if (!sanity(i))
527 		return 0;
528 
529 	n = push_pipe(i, bytes, &i_head, &off);
530 	while (n) {
531 		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
532 		char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
533 		unsigned long rem;
534 		rem = copy_mc_to_kernel(p + off, addr + xfer, chunk);
535 		chunk -= rem;
536 		kunmap_local(p);
537 		i->head = i_head;
538 		i->iov_offset = off + chunk;
539 		xfer += chunk;
540 		if (rem)
541 			break;
542 		n -= chunk;
543 		off = 0;
544 		i_head++;
545 	}
546 	i->count -= xfer;
547 	return xfer;
548 }
549 
550 /**
551  * _copy_mc_to_iter - copy to iter with source memory error exception handling
552  * @addr: source kernel address
553  * @bytes: total transfer length
554  * @i: destination iterator
555  *
556  * The pmem driver deploys this for the dax operation
557  * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
558  * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
559  * successfully copied.
560  *
561  * The main differences between this and typical _copy_to_iter().
562  *
563  * * Typical tail/residue handling after a fault retries the copy
564  *   byte-by-byte until the fault happens again. Re-triggering machine
565  *   checks is potentially fatal so the implementation uses source
566  *   alignment and poison alignment assumptions to avoid re-triggering
567  *   hardware exceptions.
568  *
569  * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
570  *   Compare to copy_to_iter() where only ITER_IOVEC attempts might return
571  *   a short copy.
572  *
573  * Return: number of bytes copied (may be %0)
574  */
575 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
576 {
577 	if (unlikely(iov_iter_is_pipe(i)))
578 		return copy_mc_pipe_to_iter(addr, bytes, i);
579 	if (iter_is_iovec(i))
580 		might_fault();
581 	__iterate_and_advance(i, bytes, base, len, off,
582 		copyout_mc(base, addr + off, len),
583 		copy_mc_to_kernel(base, addr + off, len)
584 	)
585 
586 	return bytes;
587 }
588 EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
589 #endif /* CONFIG_ARCH_HAS_COPY_MC */
590 
591 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
592 {
593 	if (unlikely(iov_iter_is_pipe(i))) {
594 		WARN_ON(1);
595 		return 0;
596 	}
597 	if (iter_is_iovec(i))
598 		might_fault();
599 	iterate_and_advance(i, bytes, base, len, off,
600 		copyin(addr + off, base, len),
601 		memcpy(addr + off, base, len)
602 	)
603 
604 	return bytes;
605 }
606 EXPORT_SYMBOL(_copy_from_iter);
607 
608 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
609 {
610 	if (unlikely(iov_iter_is_pipe(i))) {
611 		WARN_ON(1);
612 		return 0;
613 	}
614 	iterate_and_advance(i, bytes, base, len, off,
615 		__copy_from_user_inatomic_nocache(addr + off, base, len),
616 		memcpy(addr + off, base, len)
617 	)
618 
619 	return bytes;
620 }
621 EXPORT_SYMBOL(_copy_from_iter_nocache);
622 
623 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
624 /**
625  * _copy_from_iter_flushcache - write destination through cpu cache
626  * @addr: destination kernel address
627  * @bytes: total transfer length
628  * @i: source iterator
629  *
630  * The pmem driver arranges for filesystem-dax to use this facility via
631  * dax_copy_from_iter() for ensuring that writes to persistent memory
632  * are flushed through the CPU cache. It is differentiated from
633  * _copy_from_iter_nocache() in that guarantees all data is flushed for
634  * all iterator types. The _copy_from_iter_nocache() only attempts to
635  * bypass the cache for the ITER_IOVEC case, and on some archs may use
636  * instructions that strand dirty-data in the cache.
637  *
638  * Return: number of bytes copied (may be %0)
639  */
640 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
641 {
642 	if (unlikely(iov_iter_is_pipe(i))) {
643 		WARN_ON(1);
644 		return 0;
645 	}
646 	iterate_and_advance(i, bytes, base, len, off,
647 		__copy_from_user_flushcache(addr + off, base, len),
648 		memcpy_flushcache(addr + off, base, len)
649 	)
650 
651 	return bytes;
652 }
653 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
654 #endif
655 
656 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
657 {
658 	struct page *head;
659 	size_t v = n + offset;
660 
661 	/*
662 	 * The general case needs to access the page order in order
663 	 * to compute the page size.
664 	 * However, we mostly deal with order-0 pages and thus can
665 	 * avoid a possible cache line miss for requests that fit all
666 	 * page orders.
667 	 */
668 	if (n <= v && v <= PAGE_SIZE)
669 		return true;
670 
671 	head = compound_head(page);
672 	v += (page - head) << PAGE_SHIFT;
673 
674 	if (likely(n <= v && v <= (page_size(head))))
675 		return true;
676 	WARN_ON(1);
677 	return false;
678 }
679 
680 static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
681 			 struct iov_iter *i)
682 {
683 	if (unlikely(iov_iter_is_pipe(i))) {
684 		return copy_page_to_iter_pipe(page, offset, bytes, i);
685 	} else {
686 		void *kaddr = kmap_local_page(page);
687 		size_t wanted = _copy_to_iter(kaddr + offset, bytes, i);
688 		kunmap_local(kaddr);
689 		return wanted;
690 	}
691 }
692 
693 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
694 			 struct iov_iter *i)
695 {
696 	size_t res = 0;
697 	if (unlikely(!page_copy_sane(page, offset, bytes)))
698 		return 0;
699 	page += offset / PAGE_SIZE; // first subpage
700 	offset %= PAGE_SIZE;
701 	while (1) {
702 		size_t n = __copy_page_to_iter(page, offset,
703 				min(bytes, (size_t)PAGE_SIZE - offset), i);
704 		res += n;
705 		bytes -= n;
706 		if (!bytes || !n)
707 			break;
708 		offset += n;
709 		if (offset == PAGE_SIZE) {
710 			page++;
711 			offset = 0;
712 		}
713 	}
714 	return res;
715 }
716 EXPORT_SYMBOL(copy_page_to_iter);
717 
718 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
719 			 struct iov_iter *i)
720 {
721 	if (page_copy_sane(page, offset, bytes)) {
722 		void *kaddr = kmap_local_page(page);
723 		size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
724 		kunmap_local(kaddr);
725 		return wanted;
726 	}
727 	return 0;
728 }
729 EXPORT_SYMBOL(copy_page_from_iter);
730 
731 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
732 {
733 	struct pipe_inode_info *pipe = i->pipe;
734 	unsigned int p_mask = pipe->ring_size - 1;
735 	unsigned int i_head;
736 	size_t n, off;
737 
738 	if (!sanity(i))
739 		return 0;
740 
741 	bytes = n = push_pipe(i, bytes, &i_head, &off);
742 	if (unlikely(!n))
743 		return 0;
744 
745 	do {
746 		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
747 		char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
748 		memset(p + off, 0, chunk);
749 		kunmap_local(p);
750 		i->head = i_head;
751 		i->iov_offset = off + chunk;
752 		n -= chunk;
753 		off = 0;
754 		i_head++;
755 	} while (n);
756 	i->count -= bytes;
757 	return bytes;
758 }
759 
760 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
761 {
762 	if (unlikely(iov_iter_is_pipe(i)))
763 		return pipe_zero(bytes, i);
764 	iterate_and_advance(i, bytes, base, len, count,
765 		clear_user(base, len),
766 		memset(base, 0, len)
767 	)
768 
769 	return bytes;
770 }
771 EXPORT_SYMBOL(iov_iter_zero);
772 
773 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
774 				  struct iov_iter *i)
775 {
776 	char *kaddr = kmap_atomic(page), *p = kaddr + offset;
777 	if (unlikely(!page_copy_sane(page, offset, bytes))) {
778 		kunmap_atomic(kaddr);
779 		return 0;
780 	}
781 	if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
782 		kunmap_atomic(kaddr);
783 		WARN_ON(1);
784 		return 0;
785 	}
786 	iterate_and_advance(i, bytes, base, len, off,
787 		copyin(p + off, base, len),
788 		memcpy(p + off, base, len)
789 	)
790 	kunmap_atomic(kaddr);
791 	return bytes;
792 }
793 EXPORT_SYMBOL(copy_page_from_iter_atomic);
794 
795 static inline void pipe_truncate(struct iov_iter *i)
796 {
797 	struct pipe_inode_info *pipe = i->pipe;
798 	unsigned int p_tail = pipe->tail;
799 	unsigned int p_head = pipe->head;
800 	unsigned int p_mask = pipe->ring_size - 1;
801 
802 	if (!pipe_empty(p_head, p_tail)) {
803 		struct pipe_buffer *buf;
804 		unsigned int i_head = i->head;
805 		size_t off = i->iov_offset;
806 
807 		if (off) {
808 			buf = &pipe->bufs[i_head & p_mask];
809 			buf->len = off - buf->offset;
810 			i_head++;
811 		}
812 		while (p_head != i_head) {
813 			p_head--;
814 			pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]);
815 		}
816 
817 		pipe->head = p_head;
818 	}
819 }
820 
821 static void pipe_advance(struct iov_iter *i, size_t size)
822 {
823 	struct pipe_inode_info *pipe = i->pipe;
824 	if (size) {
825 		struct pipe_buffer *buf;
826 		unsigned int p_mask = pipe->ring_size - 1;
827 		unsigned int i_head = i->head;
828 		size_t off = i->iov_offset, left = size;
829 
830 		if (off) /* make it relative to the beginning of buffer */
831 			left += off - pipe->bufs[i_head & p_mask].offset;
832 		while (1) {
833 			buf = &pipe->bufs[i_head & p_mask];
834 			if (left <= buf->len)
835 				break;
836 			left -= buf->len;
837 			i_head++;
838 		}
839 		i->head = i_head;
840 		i->iov_offset = buf->offset + left;
841 	}
842 	i->count -= size;
843 	/* ... and discard everything past that point */
844 	pipe_truncate(i);
845 }
846 
847 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
848 {
849 	const struct bio_vec *bvec, *end;
850 
851 	if (!i->count)
852 		return;
853 	i->count -= size;
854 
855 	size += i->iov_offset;
856 
857 	for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) {
858 		if (likely(size < bvec->bv_len))
859 			break;
860 		size -= bvec->bv_len;
861 	}
862 	i->iov_offset = size;
863 	i->nr_segs -= bvec - i->bvec;
864 	i->bvec = bvec;
865 }
866 
867 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
868 {
869 	const struct iovec *iov, *end;
870 
871 	if (!i->count)
872 		return;
873 	i->count -= size;
874 
875 	size += i->iov_offset; // from beginning of current segment
876 	for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) {
877 		if (likely(size < iov->iov_len))
878 			break;
879 		size -= iov->iov_len;
880 	}
881 	i->iov_offset = size;
882 	i->nr_segs -= iov - i->iov;
883 	i->iov = iov;
884 }
885 
886 void iov_iter_advance(struct iov_iter *i, size_t size)
887 {
888 	if (unlikely(i->count < size))
889 		size = i->count;
890 	if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
891 		/* iovec and kvec have identical layouts */
892 		iov_iter_iovec_advance(i, size);
893 	} else if (iov_iter_is_bvec(i)) {
894 		iov_iter_bvec_advance(i, size);
895 	} else if (iov_iter_is_pipe(i)) {
896 		pipe_advance(i, size);
897 	} else if (unlikely(iov_iter_is_xarray(i))) {
898 		i->iov_offset += size;
899 		i->count -= size;
900 	} else if (iov_iter_is_discard(i)) {
901 		i->count -= size;
902 	}
903 }
904 EXPORT_SYMBOL(iov_iter_advance);
905 
906 void iov_iter_revert(struct iov_iter *i, size_t unroll)
907 {
908 	if (!unroll)
909 		return;
910 	if (WARN_ON(unroll > MAX_RW_COUNT))
911 		return;
912 	i->count += unroll;
913 	if (unlikely(iov_iter_is_pipe(i))) {
914 		struct pipe_inode_info *pipe = i->pipe;
915 		unsigned int p_mask = pipe->ring_size - 1;
916 		unsigned int i_head = i->head;
917 		size_t off = i->iov_offset;
918 		while (1) {
919 			struct pipe_buffer *b = &pipe->bufs[i_head & p_mask];
920 			size_t n = off - b->offset;
921 			if (unroll < n) {
922 				off -= unroll;
923 				break;
924 			}
925 			unroll -= n;
926 			if (!unroll && i_head == i->start_head) {
927 				off = 0;
928 				break;
929 			}
930 			i_head--;
931 			b = &pipe->bufs[i_head & p_mask];
932 			off = b->offset + b->len;
933 		}
934 		i->iov_offset = off;
935 		i->head = i_head;
936 		pipe_truncate(i);
937 		return;
938 	}
939 	if (unlikely(iov_iter_is_discard(i)))
940 		return;
941 	if (unroll <= i->iov_offset) {
942 		i->iov_offset -= unroll;
943 		return;
944 	}
945 	unroll -= i->iov_offset;
946 	if (iov_iter_is_xarray(i)) {
947 		BUG(); /* We should never go beyond the start of the specified
948 			* range since we might then be straying into pages that
949 			* aren't pinned.
950 			*/
951 	} else if (iov_iter_is_bvec(i)) {
952 		const struct bio_vec *bvec = i->bvec;
953 		while (1) {
954 			size_t n = (--bvec)->bv_len;
955 			i->nr_segs++;
956 			if (unroll <= n) {
957 				i->bvec = bvec;
958 				i->iov_offset = n - unroll;
959 				return;
960 			}
961 			unroll -= n;
962 		}
963 	} else { /* same logics for iovec and kvec */
964 		const struct iovec *iov = i->iov;
965 		while (1) {
966 			size_t n = (--iov)->iov_len;
967 			i->nr_segs++;
968 			if (unroll <= n) {
969 				i->iov = iov;
970 				i->iov_offset = n - unroll;
971 				return;
972 			}
973 			unroll -= n;
974 		}
975 	}
976 }
977 EXPORT_SYMBOL(iov_iter_revert);
978 
979 /*
980  * Return the count of just the current iov_iter segment.
981  */
982 size_t iov_iter_single_seg_count(const struct iov_iter *i)
983 {
984 	if (i->nr_segs > 1) {
985 		if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
986 			return min(i->count, i->iov->iov_len - i->iov_offset);
987 		if (iov_iter_is_bvec(i))
988 			return min(i->count, i->bvec->bv_len - i->iov_offset);
989 	}
990 	return i->count;
991 }
992 EXPORT_SYMBOL(iov_iter_single_seg_count);
993 
994 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
995 			const struct kvec *kvec, unsigned long nr_segs,
996 			size_t count)
997 {
998 	WARN_ON(direction & ~(READ | WRITE));
999 	*i = (struct iov_iter){
1000 		.iter_type = ITER_KVEC,
1001 		.data_source = direction,
1002 		.kvec = kvec,
1003 		.nr_segs = nr_segs,
1004 		.iov_offset = 0,
1005 		.count = count
1006 	};
1007 }
1008 EXPORT_SYMBOL(iov_iter_kvec);
1009 
1010 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1011 			const struct bio_vec *bvec, unsigned long nr_segs,
1012 			size_t count)
1013 {
1014 	WARN_ON(direction & ~(READ | WRITE));
1015 	*i = (struct iov_iter){
1016 		.iter_type = ITER_BVEC,
1017 		.data_source = direction,
1018 		.bvec = bvec,
1019 		.nr_segs = nr_segs,
1020 		.iov_offset = 0,
1021 		.count = count
1022 	};
1023 }
1024 EXPORT_SYMBOL(iov_iter_bvec);
1025 
1026 void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1027 			struct pipe_inode_info *pipe,
1028 			size_t count)
1029 {
1030 	BUG_ON(direction != READ);
1031 	WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
1032 	*i = (struct iov_iter){
1033 		.iter_type = ITER_PIPE,
1034 		.data_source = false,
1035 		.pipe = pipe,
1036 		.head = pipe->head,
1037 		.start_head = pipe->head,
1038 		.iov_offset = 0,
1039 		.count = count
1040 	};
1041 }
1042 EXPORT_SYMBOL(iov_iter_pipe);
1043 
1044 /**
1045  * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
1046  * @i: The iterator to initialise.
1047  * @direction: The direction of the transfer.
1048  * @xarray: The xarray to access.
1049  * @start: The start file position.
1050  * @count: The size of the I/O buffer in bytes.
1051  *
1052  * Set up an I/O iterator to either draw data out of the pages attached to an
1053  * inode or to inject data into those pages.  The pages *must* be prevented
1054  * from evaporation, either by taking a ref on them or locking them by the
1055  * caller.
1056  */
1057 void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
1058 		     struct xarray *xarray, loff_t start, size_t count)
1059 {
1060 	BUG_ON(direction & ~1);
1061 	*i = (struct iov_iter) {
1062 		.iter_type = ITER_XARRAY,
1063 		.data_source = direction,
1064 		.xarray = xarray,
1065 		.xarray_start = start,
1066 		.count = count,
1067 		.iov_offset = 0
1068 	};
1069 }
1070 EXPORT_SYMBOL(iov_iter_xarray);
1071 
1072 /**
1073  * iov_iter_discard - Initialise an I/O iterator that discards data
1074  * @i: The iterator to initialise.
1075  * @direction: The direction of the transfer.
1076  * @count: The size of the I/O buffer in bytes.
1077  *
1078  * Set up an I/O iterator that just discards everything that's written to it.
1079  * It's only available as a READ iterator.
1080  */
1081 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1082 {
1083 	BUG_ON(direction != READ);
1084 	*i = (struct iov_iter){
1085 		.iter_type = ITER_DISCARD,
1086 		.data_source = false,
1087 		.count = count,
1088 		.iov_offset = 0
1089 	};
1090 }
1091 EXPORT_SYMBOL(iov_iter_discard);
1092 
1093 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
1094 {
1095 	unsigned long res = 0;
1096 	size_t size = i->count;
1097 	size_t skip = i->iov_offset;
1098 	unsigned k;
1099 
1100 	for (k = 0; k < i->nr_segs; k++, skip = 0) {
1101 		size_t len = i->iov[k].iov_len - skip;
1102 		if (len) {
1103 			res |= (unsigned long)i->iov[k].iov_base + skip;
1104 			if (len > size)
1105 				len = size;
1106 			res |= len;
1107 			size -= len;
1108 			if (!size)
1109 				break;
1110 		}
1111 	}
1112 	return res;
1113 }
1114 
1115 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
1116 {
1117 	unsigned res = 0;
1118 	size_t size = i->count;
1119 	unsigned skip = i->iov_offset;
1120 	unsigned k;
1121 
1122 	for (k = 0; k < i->nr_segs; k++, skip = 0) {
1123 		size_t len = i->bvec[k].bv_len - skip;
1124 		res |= (unsigned long)i->bvec[k].bv_offset + skip;
1125 		if (len > size)
1126 			len = size;
1127 		res |= len;
1128 		size -= len;
1129 		if (!size)
1130 			break;
1131 	}
1132 	return res;
1133 }
1134 
1135 unsigned long iov_iter_alignment(const struct iov_iter *i)
1136 {
1137 	/* iovec and kvec have identical layouts */
1138 	if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1139 		return iov_iter_alignment_iovec(i);
1140 
1141 	if (iov_iter_is_bvec(i))
1142 		return iov_iter_alignment_bvec(i);
1143 
1144 	if (iov_iter_is_pipe(i)) {
1145 		unsigned int p_mask = i->pipe->ring_size - 1;
1146 		size_t size = i->count;
1147 
1148 		if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
1149 			return size | i->iov_offset;
1150 		return size;
1151 	}
1152 
1153 	if (iov_iter_is_xarray(i))
1154 		return (i->xarray_start + i->iov_offset) | i->count;
1155 
1156 	return 0;
1157 }
1158 EXPORT_SYMBOL(iov_iter_alignment);
1159 
1160 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1161 {
1162 	unsigned long res = 0;
1163 	unsigned long v = 0;
1164 	size_t size = i->count;
1165 	unsigned k;
1166 
1167 	if (WARN_ON(!iter_is_iovec(i)))
1168 		return ~0U;
1169 
1170 	for (k = 0; k < i->nr_segs; k++) {
1171 		if (i->iov[k].iov_len) {
1172 			unsigned long base = (unsigned long)i->iov[k].iov_base;
1173 			if (v) // if not the first one
1174 				res |= base | v; // this start | previous end
1175 			v = base + i->iov[k].iov_len;
1176 			if (size <= i->iov[k].iov_len)
1177 				break;
1178 			size -= i->iov[k].iov_len;
1179 		}
1180 	}
1181 	return res;
1182 }
1183 EXPORT_SYMBOL(iov_iter_gap_alignment);
1184 
1185 static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1186 				size_t maxsize,
1187 				struct page **pages,
1188 				int iter_head,
1189 				size_t *start)
1190 {
1191 	struct pipe_inode_info *pipe = i->pipe;
1192 	unsigned int p_mask = pipe->ring_size - 1;
1193 	ssize_t n = push_pipe(i, maxsize, &iter_head, start);
1194 	if (!n)
1195 		return -EFAULT;
1196 
1197 	maxsize = n;
1198 	n += *start;
1199 	while (n > 0) {
1200 		get_page(*pages++ = pipe->bufs[iter_head & p_mask].page);
1201 		iter_head++;
1202 		n -= PAGE_SIZE;
1203 	}
1204 
1205 	return maxsize;
1206 }
1207 
1208 static ssize_t pipe_get_pages(struct iov_iter *i,
1209 		   struct page **pages, size_t maxsize, unsigned maxpages,
1210 		   size_t *start)
1211 {
1212 	unsigned int iter_head, npages;
1213 	size_t capacity;
1214 
1215 	if (!sanity(i))
1216 		return -EFAULT;
1217 
1218 	data_start(i, &iter_head, start);
1219 	/* Amount of free space: some of this one + all after this one */
1220 	npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1221 	capacity = min(npages, maxpages) * PAGE_SIZE - *start;
1222 
1223 	return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
1224 }
1225 
1226 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
1227 					  pgoff_t index, unsigned int nr_pages)
1228 {
1229 	XA_STATE(xas, xa, index);
1230 	struct page *page;
1231 	unsigned int ret = 0;
1232 
1233 	rcu_read_lock();
1234 	for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1235 		if (xas_retry(&xas, page))
1236 			continue;
1237 
1238 		/* Has the page moved or been split? */
1239 		if (unlikely(page != xas_reload(&xas))) {
1240 			xas_reset(&xas);
1241 			continue;
1242 		}
1243 
1244 		pages[ret] = find_subpage(page, xas.xa_index);
1245 		get_page(pages[ret]);
1246 		if (++ret == nr_pages)
1247 			break;
1248 	}
1249 	rcu_read_unlock();
1250 	return ret;
1251 }
1252 
1253 static ssize_t iter_xarray_get_pages(struct iov_iter *i,
1254 				     struct page **pages, size_t maxsize,
1255 				     unsigned maxpages, size_t *_start_offset)
1256 {
1257 	unsigned nr, offset;
1258 	pgoff_t index, count;
1259 	size_t size = maxsize, actual;
1260 	loff_t pos;
1261 
1262 	if (!size || !maxpages)
1263 		return 0;
1264 
1265 	pos = i->xarray_start + i->iov_offset;
1266 	index = pos >> PAGE_SHIFT;
1267 	offset = pos & ~PAGE_MASK;
1268 	*_start_offset = offset;
1269 
1270 	count = 1;
1271 	if (size > PAGE_SIZE - offset) {
1272 		size -= PAGE_SIZE - offset;
1273 		count += size >> PAGE_SHIFT;
1274 		size &= ~PAGE_MASK;
1275 		if (size)
1276 			count++;
1277 	}
1278 
1279 	if (count > maxpages)
1280 		count = maxpages;
1281 
1282 	nr = iter_xarray_populate_pages(pages, i->xarray, index, count);
1283 	if (nr == 0)
1284 		return 0;
1285 
1286 	actual = PAGE_SIZE * nr;
1287 	actual -= offset;
1288 	if (nr == count && size > 0) {
1289 		unsigned last_offset = (nr > 1) ? 0 : offset;
1290 		actual -= PAGE_SIZE - (last_offset + size);
1291 	}
1292 	return actual;
1293 }
1294 
1295 /* must be done on non-empty ITER_IOVEC one */
1296 static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
1297 {
1298 	size_t skip;
1299 	long k;
1300 
1301 	for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
1302 		size_t len = i->iov[k].iov_len - skip;
1303 
1304 		if (unlikely(!len))
1305 			continue;
1306 		if (*size > len)
1307 			*size = len;
1308 		return (unsigned long)i->iov[k].iov_base + skip;
1309 	}
1310 	BUG(); // if it had been empty, we wouldn't get called
1311 }
1312 
1313 /* must be done on non-empty ITER_BVEC one */
1314 static struct page *first_bvec_segment(const struct iov_iter *i,
1315 				       size_t *size, size_t *start)
1316 {
1317 	struct page *page;
1318 	size_t skip = i->iov_offset, len;
1319 
1320 	len = i->bvec->bv_len - skip;
1321 	if (*size > len)
1322 		*size = len;
1323 	skip += i->bvec->bv_offset;
1324 	page = i->bvec->bv_page + skip / PAGE_SIZE;
1325 	*start = skip % PAGE_SIZE;
1326 	return page;
1327 }
1328 
1329 ssize_t iov_iter_get_pages(struct iov_iter *i,
1330 		   struct page **pages, size_t maxsize, unsigned maxpages,
1331 		   size_t *start)
1332 {
1333 	int n, res;
1334 
1335 	if (maxsize > i->count)
1336 		maxsize = i->count;
1337 	if (!maxsize)
1338 		return 0;
1339 	if (maxsize > MAX_RW_COUNT)
1340 		maxsize = MAX_RW_COUNT;
1341 
1342 	if (likely(iter_is_iovec(i))) {
1343 		unsigned int gup_flags = 0;
1344 		unsigned long addr;
1345 
1346 		if (iov_iter_rw(i) != WRITE)
1347 			gup_flags |= FOLL_WRITE;
1348 		if (i->nofault)
1349 			gup_flags |= FOLL_NOFAULT;
1350 
1351 		addr = first_iovec_segment(i, &maxsize);
1352 		*start = addr % PAGE_SIZE;
1353 		addr &= PAGE_MASK;
1354 		n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1355 		if (n > maxpages)
1356 			n = maxpages;
1357 		res = get_user_pages_fast(addr, n, gup_flags, pages);
1358 		if (unlikely(res <= 0))
1359 			return res;
1360 		return min_t(size_t, maxsize, res * PAGE_SIZE - *start);
1361 	}
1362 	if (iov_iter_is_bvec(i)) {
1363 		struct page *page;
1364 
1365 		page = first_bvec_segment(i, &maxsize, start);
1366 		n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1367 		if (n > maxpages)
1368 			n = maxpages;
1369 		for (int k = 0; k < n; k++)
1370 			get_page(*pages++ = page++);
1371 		return min_t(size_t, maxsize, n * PAGE_SIZE - *start);
1372 	}
1373 	if (iov_iter_is_pipe(i))
1374 		return pipe_get_pages(i, pages, maxsize, maxpages, start);
1375 	if (iov_iter_is_xarray(i))
1376 		return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
1377 	return -EFAULT;
1378 }
1379 EXPORT_SYMBOL(iov_iter_get_pages);
1380 
1381 static struct page **get_pages_array(size_t n)
1382 {
1383 	return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1384 }
1385 
1386 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1387 		   struct page ***pages, size_t maxsize,
1388 		   size_t *start)
1389 {
1390 	struct page **p;
1391 	unsigned int iter_head, npages;
1392 	ssize_t n;
1393 
1394 	if (!sanity(i))
1395 		return -EFAULT;
1396 
1397 	data_start(i, &iter_head, start);
1398 	/* Amount of free space: some of this one + all after this one */
1399 	npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1400 	n = npages * PAGE_SIZE - *start;
1401 	if (maxsize > n)
1402 		maxsize = n;
1403 	else
1404 		npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1405 	p = get_pages_array(npages);
1406 	if (!p)
1407 		return -ENOMEM;
1408 	n = __pipe_get_pages(i, maxsize, p, iter_head, start);
1409 	if (n > 0)
1410 		*pages = p;
1411 	else
1412 		kvfree(p);
1413 	return n;
1414 }
1415 
1416 static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i,
1417 					   struct page ***pages, size_t maxsize,
1418 					   size_t *_start_offset)
1419 {
1420 	struct page **p;
1421 	unsigned nr, offset;
1422 	pgoff_t index, count;
1423 	size_t size = maxsize, actual;
1424 	loff_t pos;
1425 
1426 	if (!size)
1427 		return 0;
1428 
1429 	pos = i->xarray_start + i->iov_offset;
1430 	index = pos >> PAGE_SHIFT;
1431 	offset = pos & ~PAGE_MASK;
1432 	*_start_offset = offset;
1433 
1434 	count = 1;
1435 	if (size > PAGE_SIZE - offset) {
1436 		size -= PAGE_SIZE - offset;
1437 		count += size >> PAGE_SHIFT;
1438 		size &= ~PAGE_MASK;
1439 		if (size)
1440 			count++;
1441 	}
1442 
1443 	p = get_pages_array(count);
1444 	if (!p)
1445 		return -ENOMEM;
1446 	*pages = p;
1447 
1448 	nr = iter_xarray_populate_pages(p, i->xarray, index, count);
1449 	if (nr == 0)
1450 		return 0;
1451 
1452 	actual = PAGE_SIZE * nr;
1453 	actual -= offset;
1454 	if (nr == count && size > 0) {
1455 		unsigned last_offset = (nr > 1) ? 0 : offset;
1456 		actual -= PAGE_SIZE - (last_offset + size);
1457 	}
1458 	return actual;
1459 }
1460 
1461 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1462 		   struct page ***pages, size_t maxsize,
1463 		   size_t *start)
1464 {
1465 	struct page **p;
1466 	int n, res;
1467 
1468 	if (maxsize > i->count)
1469 		maxsize = i->count;
1470 	if (!maxsize)
1471 		return 0;
1472 	if (maxsize > MAX_RW_COUNT)
1473 		maxsize = MAX_RW_COUNT;
1474 
1475 	if (likely(iter_is_iovec(i))) {
1476 		unsigned int gup_flags = 0;
1477 		unsigned long addr;
1478 
1479 		if (iov_iter_rw(i) != WRITE)
1480 			gup_flags |= FOLL_WRITE;
1481 		if (i->nofault)
1482 			gup_flags |= FOLL_NOFAULT;
1483 
1484 		addr = first_iovec_segment(i, &maxsize);
1485 		*start = addr % PAGE_SIZE;
1486 		addr &= PAGE_MASK;
1487 		n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1488 		p = get_pages_array(n);
1489 		if (!p)
1490 			return -ENOMEM;
1491 		res = get_user_pages_fast(addr, n, gup_flags, p);
1492 		if (unlikely(res <= 0)) {
1493 			kvfree(p);
1494 			*pages = NULL;
1495 			return res;
1496 		}
1497 		*pages = p;
1498 		return min_t(size_t, maxsize, res * PAGE_SIZE - *start);
1499 	}
1500 	if (iov_iter_is_bvec(i)) {
1501 		struct page *page;
1502 
1503 		page = first_bvec_segment(i, &maxsize, start);
1504 		n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1505 		*pages = p = get_pages_array(n);
1506 		if (!p)
1507 			return -ENOMEM;
1508 		for (int k = 0; k < n; k++)
1509 			get_page(*p++ = page++);
1510 		return min_t(size_t, maxsize, n * PAGE_SIZE - *start);
1511 	}
1512 	if (iov_iter_is_pipe(i))
1513 		return pipe_get_pages_alloc(i, pages, maxsize, start);
1514 	if (iov_iter_is_xarray(i))
1515 		return iter_xarray_get_pages_alloc(i, pages, maxsize, start);
1516 	return -EFAULT;
1517 }
1518 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1519 
1520 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1521 			       struct iov_iter *i)
1522 {
1523 	__wsum sum, next;
1524 	sum = *csum;
1525 	if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1526 		WARN_ON(1);
1527 		return 0;
1528 	}
1529 	iterate_and_advance(i, bytes, base, len, off, ({
1530 		next = csum_and_copy_from_user(base, addr + off, len);
1531 		sum = csum_block_add(sum, next, off);
1532 		next ? 0 : len;
1533 	}), ({
1534 		sum = csum_and_memcpy(addr + off, base, len, sum, off);
1535 	})
1536 	)
1537 	*csum = sum;
1538 	return bytes;
1539 }
1540 EXPORT_SYMBOL(csum_and_copy_from_iter);
1541 
1542 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
1543 			     struct iov_iter *i)
1544 {
1545 	struct csum_state *csstate = _csstate;
1546 	__wsum sum, next;
1547 
1548 	if (unlikely(iov_iter_is_discard(i))) {
1549 		WARN_ON(1);	/* for now */
1550 		return 0;
1551 	}
1552 
1553 	sum = csum_shift(csstate->csum, csstate->off);
1554 	if (unlikely(iov_iter_is_pipe(i)))
1555 		bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum);
1556 	else iterate_and_advance(i, bytes, base, len, off, ({
1557 		next = csum_and_copy_to_user(addr + off, base, len);
1558 		sum = csum_block_add(sum, next, off);
1559 		next ? 0 : len;
1560 	}), ({
1561 		sum = csum_and_memcpy(base, addr + off, len, sum, off);
1562 	})
1563 	)
1564 	csstate->csum = csum_shift(sum, csstate->off);
1565 	csstate->off += bytes;
1566 	return bytes;
1567 }
1568 EXPORT_SYMBOL(csum_and_copy_to_iter);
1569 
1570 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1571 		struct iov_iter *i)
1572 {
1573 #ifdef CONFIG_CRYPTO_HASH
1574 	struct ahash_request *hash = hashp;
1575 	struct scatterlist sg;
1576 	size_t copied;
1577 
1578 	copied = copy_to_iter(addr, bytes, i);
1579 	sg_init_one(&sg, addr, copied);
1580 	ahash_request_set_crypt(hash, &sg, NULL, copied);
1581 	crypto_ahash_update(hash);
1582 	return copied;
1583 #else
1584 	return 0;
1585 #endif
1586 }
1587 EXPORT_SYMBOL(hash_and_copy_to_iter);
1588 
1589 static int iov_npages(const struct iov_iter *i, int maxpages)
1590 {
1591 	size_t skip = i->iov_offset, size = i->count;
1592 	const struct iovec *p;
1593 	int npages = 0;
1594 
1595 	for (p = i->iov; size; skip = 0, p++) {
1596 		unsigned offs = offset_in_page(p->iov_base + skip);
1597 		size_t len = min(p->iov_len - skip, size);
1598 
1599 		if (len) {
1600 			size -= len;
1601 			npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1602 			if (unlikely(npages > maxpages))
1603 				return maxpages;
1604 		}
1605 	}
1606 	return npages;
1607 }
1608 
1609 static int bvec_npages(const struct iov_iter *i, int maxpages)
1610 {
1611 	size_t skip = i->iov_offset, size = i->count;
1612 	const struct bio_vec *p;
1613 	int npages = 0;
1614 
1615 	for (p = i->bvec; size; skip = 0, p++) {
1616 		unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
1617 		size_t len = min(p->bv_len - skip, size);
1618 
1619 		size -= len;
1620 		npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1621 		if (unlikely(npages > maxpages))
1622 			return maxpages;
1623 	}
1624 	return npages;
1625 }
1626 
1627 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1628 {
1629 	if (unlikely(!i->count))
1630 		return 0;
1631 	/* iovec and kvec have identical layouts */
1632 	if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1633 		return iov_npages(i, maxpages);
1634 	if (iov_iter_is_bvec(i))
1635 		return bvec_npages(i, maxpages);
1636 	if (iov_iter_is_pipe(i)) {
1637 		unsigned int iter_head;
1638 		int npages;
1639 		size_t off;
1640 
1641 		if (!sanity(i))
1642 			return 0;
1643 
1644 		data_start(i, &iter_head, &off);
1645 		/* some of this one + all after this one */
1646 		npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1647 		return min(npages, maxpages);
1648 	}
1649 	if (iov_iter_is_xarray(i)) {
1650 		unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
1651 		int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
1652 		return min(npages, maxpages);
1653 	}
1654 	return 0;
1655 }
1656 EXPORT_SYMBOL(iov_iter_npages);
1657 
1658 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1659 {
1660 	*new = *old;
1661 	if (unlikely(iov_iter_is_pipe(new))) {
1662 		WARN_ON(1);
1663 		return NULL;
1664 	}
1665 	if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new)))
1666 		return NULL;
1667 	if (iov_iter_is_bvec(new))
1668 		return new->bvec = kmemdup(new->bvec,
1669 				    new->nr_segs * sizeof(struct bio_vec),
1670 				    flags);
1671 	else
1672 		/* iovec and kvec have identical layout */
1673 		return new->iov = kmemdup(new->iov,
1674 				   new->nr_segs * sizeof(struct iovec),
1675 				   flags);
1676 }
1677 EXPORT_SYMBOL(dup_iter);
1678 
1679 static int copy_compat_iovec_from_user(struct iovec *iov,
1680 		const struct iovec __user *uvec, unsigned long nr_segs)
1681 {
1682 	const struct compat_iovec __user *uiov =
1683 		(const struct compat_iovec __user *)uvec;
1684 	int ret = -EFAULT, i;
1685 
1686 	if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1687 		return -EFAULT;
1688 
1689 	for (i = 0; i < nr_segs; i++) {
1690 		compat_uptr_t buf;
1691 		compat_ssize_t len;
1692 
1693 		unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1694 		unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1695 
1696 		/* check for compat_size_t not fitting in compat_ssize_t .. */
1697 		if (len < 0) {
1698 			ret = -EINVAL;
1699 			goto uaccess_end;
1700 		}
1701 		iov[i].iov_base = compat_ptr(buf);
1702 		iov[i].iov_len = len;
1703 	}
1704 
1705 	ret = 0;
1706 uaccess_end:
1707 	user_access_end();
1708 	return ret;
1709 }
1710 
1711 static int copy_iovec_from_user(struct iovec *iov,
1712 		const struct iovec __user *uvec, unsigned long nr_segs)
1713 {
1714 	unsigned long seg;
1715 
1716 	if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
1717 		return -EFAULT;
1718 	for (seg = 0; seg < nr_segs; seg++) {
1719 		if ((ssize_t)iov[seg].iov_len < 0)
1720 			return -EINVAL;
1721 	}
1722 
1723 	return 0;
1724 }
1725 
1726 struct iovec *iovec_from_user(const struct iovec __user *uvec,
1727 		unsigned long nr_segs, unsigned long fast_segs,
1728 		struct iovec *fast_iov, bool compat)
1729 {
1730 	struct iovec *iov = fast_iov;
1731 	int ret;
1732 
1733 	/*
1734 	 * SuS says "The readv() function *may* fail if the iovcnt argument was
1735 	 * less than or equal to 0, or greater than {IOV_MAX}.  Linux has
1736 	 * traditionally returned zero for zero segments, so...
1737 	 */
1738 	if (nr_segs == 0)
1739 		return iov;
1740 	if (nr_segs > UIO_MAXIOV)
1741 		return ERR_PTR(-EINVAL);
1742 	if (nr_segs > fast_segs) {
1743 		iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
1744 		if (!iov)
1745 			return ERR_PTR(-ENOMEM);
1746 	}
1747 
1748 	if (compat)
1749 		ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1750 	else
1751 		ret = copy_iovec_from_user(iov, uvec, nr_segs);
1752 	if (ret) {
1753 		if (iov != fast_iov)
1754 			kfree(iov);
1755 		return ERR_PTR(ret);
1756 	}
1757 
1758 	return iov;
1759 }
1760 
1761 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1762 		 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1763 		 struct iov_iter *i, bool compat)
1764 {
1765 	ssize_t total_len = 0;
1766 	unsigned long seg;
1767 	struct iovec *iov;
1768 
1769 	iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1770 	if (IS_ERR(iov)) {
1771 		*iovp = NULL;
1772 		return PTR_ERR(iov);
1773 	}
1774 
1775 	/*
1776 	 * According to the Single Unix Specification we should return EINVAL if
1777 	 * an element length is < 0 when cast to ssize_t or if the total length
1778 	 * would overflow the ssize_t return value of the system call.
1779 	 *
1780 	 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1781 	 * overflow case.
1782 	 */
1783 	for (seg = 0; seg < nr_segs; seg++) {
1784 		ssize_t len = (ssize_t)iov[seg].iov_len;
1785 
1786 		if (!access_ok(iov[seg].iov_base, len)) {
1787 			if (iov != *iovp)
1788 				kfree(iov);
1789 			*iovp = NULL;
1790 			return -EFAULT;
1791 		}
1792 
1793 		if (len > MAX_RW_COUNT - total_len) {
1794 			len = MAX_RW_COUNT - total_len;
1795 			iov[seg].iov_len = len;
1796 		}
1797 		total_len += len;
1798 	}
1799 
1800 	iov_iter_init(i, type, iov, nr_segs, total_len);
1801 	if (iov == *iovp)
1802 		*iovp = NULL;
1803 	else
1804 		*iovp = iov;
1805 	return total_len;
1806 }
1807 
1808 /**
1809  * import_iovec() - Copy an array of &struct iovec from userspace
1810  *     into the kernel, check that it is valid, and initialize a new
1811  *     &struct iov_iter iterator to access it.
1812  *
1813  * @type: One of %READ or %WRITE.
1814  * @uvec: Pointer to the userspace array.
1815  * @nr_segs: Number of elements in userspace array.
1816  * @fast_segs: Number of elements in @iov.
1817  * @iovp: (input and output parameter) Pointer to pointer to (usually small
1818  *     on-stack) kernel array.
1819  * @i: Pointer to iterator that will be initialized on success.
1820  *
1821  * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1822  * then this function places %NULL in *@iov on return. Otherwise, a new
1823  * array will be allocated and the result placed in *@iov. This means that
1824  * the caller may call kfree() on *@iov regardless of whether the small
1825  * on-stack array was used or not (and regardless of whether this function
1826  * returns an error or not).
1827  *
1828  * Return: Negative error code on error, bytes imported on success
1829  */
1830 ssize_t import_iovec(int type, const struct iovec __user *uvec,
1831 		 unsigned nr_segs, unsigned fast_segs,
1832 		 struct iovec **iovp, struct iov_iter *i)
1833 {
1834 	return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
1835 			      in_compat_syscall());
1836 }
1837 EXPORT_SYMBOL(import_iovec);
1838 
1839 int import_single_range(int rw, void __user *buf, size_t len,
1840 		 struct iovec *iov, struct iov_iter *i)
1841 {
1842 	if (len > MAX_RW_COUNT)
1843 		len = MAX_RW_COUNT;
1844 	if (unlikely(!access_ok(buf, len)))
1845 		return -EFAULT;
1846 
1847 	iov->iov_base = buf;
1848 	iov->iov_len = len;
1849 	iov_iter_init(i, rw, iov, 1, len);
1850 	return 0;
1851 }
1852 EXPORT_SYMBOL(import_single_range);
1853 
1854 /**
1855  * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
1856  *     iov_iter_save_state() was called.
1857  *
1858  * @i: &struct iov_iter to restore
1859  * @state: state to restore from
1860  *
1861  * Used after iov_iter_save_state() to bring restore @i, if operations may
1862  * have advanced it.
1863  *
1864  * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
1865  */
1866 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
1867 {
1868 	if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) &&
1869 			 !iov_iter_is_kvec(i))
1870 		return;
1871 	i->iov_offset = state->iov_offset;
1872 	i->count = state->count;
1873 	/*
1874 	 * For the *vec iters, nr_segs + iov is constant - if we increment
1875 	 * the vec, then we also decrement the nr_segs count. Hence we don't
1876 	 * need to track both of these, just one is enough and we can deduct
1877 	 * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
1878 	 * size, so we can just increment the iov pointer as they are unionzed.
1879 	 * ITER_BVEC _may_ be the same size on some archs, but on others it is
1880 	 * not. Be safe and handle it separately.
1881 	 */
1882 	BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
1883 	if (iov_iter_is_bvec(i))
1884 		i->bvec -= state->nr_segs - i->nr_segs;
1885 	else
1886 		i->iov -= state->nr_segs - i->nr_segs;
1887 	i->nr_segs = state->nr_segs;
1888 }
1889