Lines Matching +full:page +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0-only
5 #include <linux/fault-inject-usercopy.h>
21 base = __p + i->iov_offset; \
22 len -= (STEP); \
23 i->iov_offset += len; \
30 size_t skip = i->iov_offset; \
32 len = min(n, __p->iov_len - skip); \
34 base = __p->iov_base + skip; \
35 len -= (STEP); \
38 n -= len; \
39 if (skip < __p->iov_len) \
45 i->iov_offset = skip; \
51 unsigned skip = i->iov_offset; \
53 unsigned offset = p->bv_offset + skip; \
55 void *kaddr = kmap_local_page(p->bv_page + \
58 len = min(min(n, (size_t)(p->bv_len - skip)), \
59 (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
62 len -= left; \
65 if (skip == p->bv_len) { \
69 n -= len; \
73 i->iov_offset = skip; \
81 loff_t start = i->xarray_start + i->iov_offset; \
83 XA_STATE(xas, i->xarray, index); \
85 len = PAGE_SIZE - offset_in_page(start); \
102 len -= left; \
104 n -= len; \
113 i->iov_offset += __off; \
118 if (unlikely(i->count < n)) \
119 n = i->count; \
125 i->ubuf, (I)) \
132 i->nr_segs -= iov - iter_iov(i); \
133 i->__iov = iov; \
135 const struct bio_vec *bvec = i->bvec; \
140 i->nr_segs -= bvec - i->bvec; \
141 i->bvec = bvec; \
143 const struct kvec *kvec = i->kvec; \
148 i->nr_segs -= kvec - i->kvec; \
149 i->kvec = kvec; \
156 i->count -= n; \
200 * fault_in_iov_iter_readable - fault in iov iterator for reading
202 * @size: maximum length
205 * @size. For each iovec, fault in each page that constitutes the iovec.
210 * Always returns 0 for non-userspace iterators.
212 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size) in fault_in_iov_iter_readable() argument
215 size_t n = min(size, iov_iter_count(i)); in fault_in_iov_iter_readable()
216 n -= fault_in_readable(i->ubuf + i->iov_offset, n); in fault_in_iov_iter_readable()
217 return size - n; in fault_in_iov_iter_readable()
219 size_t count = min(size, iov_iter_count(i)); in fault_in_iov_iter_readable()
223 size -= count; in fault_in_iov_iter_readable()
224 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) { in fault_in_iov_iter_readable()
225 size_t len = min(count, p->iov_len - skip); in fault_in_iov_iter_readable()
230 ret = fault_in_readable(p->iov_base + skip, len); in fault_in_iov_iter_readable()
231 count -= len - ret; in fault_in_iov_iter_readable()
235 return count + size; in fault_in_iov_iter_readable()
242 * fault_in_iov_iter_writeable - fault in iov iterator for writing
244 * @size: maximum length
247 * hardware page faults. This is primarily useful when we already know that
253 * Always returns 0 for non-user-space iterators.
255 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size) in fault_in_iov_iter_writeable() argument
258 size_t n = min(size, iov_iter_count(i)); in fault_in_iov_iter_writeable()
259 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n); in fault_in_iov_iter_writeable()
260 return size - n; in fault_in_iov_iter_writeable()
262 size_t count = min(size, iov_iter_count(i)); in fault_in_iov_iter_writeable()
266 size -= count; in fault_in_iov_iter_writeable()
267 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) { in fault_in_iov_iter_writeable()
268 size_t len = min(count, p->iov_len - skip); in fault_in_iov_iter_writeable()
273 ret = fault_in_safe_writeable(p->iov_base + skip, len); in fault_in_iov_iter_writeable()
274 count -= len - ret; in fault_in_iov_iter_writeable()
278 return count + size; in fault_in_iov_iter_writeable()
312 if (WARN_ON_ONCE(i->data_source)) in _copy_to_iter()
336 * _copy_mc_to_iter - copy to iter with source memory error exception handling
342 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
343 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
349 * byte-by-byte until the fault happens again. Re-triggering machine
351 * alignment and poison alignment assumptions to avoid re-triggering
361 if (WARN_ON_ONCE(i->data_source)) in _copy_mc_to_iter()
376 size_t size) in memcpy_from_iter() argument
379 return (void *)copy_mc_to_kernel(to, from, size); in memcpy_from_iter()
380 return memcpy(to, from, size); in memcpy_from_iter()
385 if (WARN_ON_ONCE(!i->data_source)) in _copy_from_iter()
401 if (WARN_ON_ONCE(!i->data_source)) in _copy_from_iter_nocache()
415 * _copy_from_iter_flushcache - write destination through cpu cache
420 * The pmem driver arranges for filesystem-dax to use this facility via
426 * instructions that strand dirty-data in the cache.
432 if (WARN_ON_ONCE(!i->data_source)) in _copy_from_iter_flushcache()
445 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) in page_copy_sane() argument
447 struct page *head; in page_copy_sane()
451 * The general case needs to access the page order in order in page_copy_sane()
452 * to compute the page size. in page_copy_sane()
453 * However, we mostly deal with order-0 pages and thus can in page_copy_sane()
455 * page orders. in page_copy_sane()
460 head = compound_head(page); in page_copy_sane()
461 v += (page - head) << PAGE_SHIFT; in page_copy_sane()
468 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, in copy_page_to_iter() argument
472 if (!page_copy_sane(page, offset, bytes)) in copy_page_to_iter()
474 if (WARN_ON_ONCE(i->data_source)) in copy_page_to_iter()
476 page += offset / PAGE_SIZE; // first subpage in copy_page_to_iter()
479 void *kaddr = kmap_local_page(page); in copy_page_to_iter()
480 size_t n = min(bytes, (size_t)PAGE_SIZE - offset); in copy_page_to_iter()
484 bytes -= n; in copy_page_to_iter()
489 page++; in copy_page_to_iter()
497 size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t bytes, in copy_page_to_iter_nofault() argument
502 if (!page_copy_sane(page, offset, bytes)) in copy_page_to_iter_nofault()
504 if (WARN_ON_ONCE(i->data_source)) in copy_page_to_iter_nofault()
506 page += offset / PAGE_SIZE; // first subpage in copy_page_to_iter_nofault()
509 void *kaddr = kmap_local_page(page); in copy_page_to_iter_nofault()
510 size_t n = min(bytes, (size_t)PAGE_SIZE - offset); in copy_page_to_iter_nofault()
518 bytes -= n; in copy_page_to_iter_nofault()
523 page++; in copy_page_to_iter_nofault()
531 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, in copy_page_from_iter() argument
535 if (!page_copy_sane(page, offset, bytes)) in copy_page_from_iter()
537 page += offset / PAGE_SIZE; // first subpage in copy_page_from_iter()
540 void *kaddr = kmap_local_page(page); in copy_page_from_iter()
541 size_t n = min(bytes, (size_t)PAGE_SIZE - offset); in copy_page_from_iter()
545 bytes -= n; in copy_page_from_iter()
550 page++; in copy_page_from_iter()
569 size_t copy_page_from_iter_atomic(struct page *page, size_t offset, in copy_page_from_iter_atomic() argument
574 PageHighMem(page); in copy_page_from_iter_atomic()
576 if (!page_copy_sane(page, offset, bytes)) in copy_page_from_iter_atomic()
578 if (WARN_ON_ONCE(!i->data_source)) in copy_page_from_iter_atomic()
584 n = bytes - copied; in copy_page_from_iter_atomic()
586 page += offset / PAGE_SIZE; in copy_page_from_iter_atomic()
588 n = min_t(size_t, n, PAGE_SIZE - offset); in copy_page_from_iter_atomic()
591 p = kmap_atomic(page) + offset; in copy_page_from_iter_atomic()
605 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) in iov_iter_bvec_advance() argument
609 if (!i->count) in iov_iter_bvec_advance()
611 i->count -= size; in iov_iter_bvec_advance()
613 size += i->iov_offset; in iov_iter_bvec_advance()
615 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) { in iov_iter_bvec_advance()
616 if (likely(size < bvec->bv_len)) in iov_iter_bvec_advance()
618 size -= bvec->bv_len; in iov_iter_bvec_advance()
620 i->iov_offset = size; in iov_iter_bvec_advance()
621 i->nr_segs -= bvec - i->bvec; in iov_iter_bvec_advance()
622 i->bvec = bvec; in iov_iter_bvec_advance()
625 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size) in iov_iter_iovec_advance() argument
629 if (!i->count) in iov_iter_iovec_advance()
631 i->count -= size; in iov_iter_iovec_advance()
633 size += i->iov_offset; // from beginning of current segment in iov_iter_iovec_advance()
634 for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) { in iov_iter_iovec_advance()
635 if (likely(size < iov->iov_len)) in iov_iter_iovec_advance()
637 size -= iov->iov_len; in iov_iter_iovec_advance()
639 i->iov_offset = size; in iov_iter_iovec_advance()
640 i->nr_segs -= iov - iter_iov(i); in iov_iter_iovec_advance()
641 i->__iov = iov; in iov_iter_iovec_advance()
644 void iov_iter_advance(struct iov_iter *i, size_t size) in iov_iter_advance() argument
646 if (unlikely(i->count < size)) in iov_iter_advance()
647 size = i->count; in iov_iter_advance()
649 i->iov_offset += size; in iov_iter_advance()
650 i->count -= size; in iov_iter_advance()
653 iov_iter_iovec_advance(i, size); in iov_iter_advance()
655 iov_iter_bvec_advance(i, size); in iov_iter_advance()
657 i->count -= size; in iov_iter_advance()
668 i->count += unroll; in iov_iter_revert()
671 if (unroll <= i->iov_offset) { in iov_iter_revert()
672 i->iov_offset -= unroll; in iov_iter_revert()
675 unroll -= i->iov_offset; in iov_iter_revert()
682 const struct bio_vec *bvec = i->bvec; in iov_iter_revert()
684 size_t n = (--bvec)->bv_len; in iov_iter_revert()
685 i->nr_segs++; in iov_iter_revert()
687 i->bvec = bvec; in iov_iter_revert()
688 i->iov_offset = n - unroll; in iov_iter_revert()
691 unroll -= n; in iov_iter_revert()
696 size_t n = (--iov)->iov_len; in iov_iter_revert()
697 i->nr_segs++; in iov_iter_revert()
699 i->__iov = iov; in iov_iter_revert()
700 i->iov_offset = n - unroll; in iov_iter_revert()
703 unroll -= n; in iov_iter_revert()
714 if (i->nr_segs > 1) { in iov_iter_single_seg_count()
716 return min(i->count, iter_iov(i)->iov_len - i->iov_offset); in iov_iter_single_seg_count()
718 return min(i->count, i->bvec->bv_len - i->iov_offset); in iov_iter_single_seg_count()
720 return i->count; in iov_iter_single_seg_count()
759 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
764 * @count: The size of the I/O buffer in bytes.
788 * iov_iter_discard - Initialise an I/O iterator that discards data
791 * @count: The size of the I/O buffer in bytes.
812 size_t size = i->count; in iov_iter_aligned_iovec() local
813 size_t skip = i->iov_offset; in iov_iter_aligned_iovec()
816 for (k = 0; k < i->nr_segs; k++, skip = 0) { in iov_iter_aligned_iovec()
818 size_t len = iov->iov_len - skip; in iov_iter_aligned_iovec()
820 if (len > size) in iov_iter_aligned_iovec()
821 len = size; in iov_iter_aligned_iovec()
824 if ((unsigned long)(iov->iov_base + skip) & addr_mask) in iov_iter_aligned_iovec()
827 size -= len; in iov_iter_aligned_iovec()
828 if (!size) in iov_iter_aligned_iovec()
837 size_t size = i->count; in iov_iter_aligned_bvec() local
838 unsigned skip = i->iov_offset; in iov_iter_aligned_bvec()
841 for (k = 0; k < i->nr_segs; k++, skip = 0) { in iov_iter_aligned_bvec()
842 size_t len = i->bvec[k].bv_len - skip; in iov_iter_aligned_bvec()
844 if (len > size) in iov_iter_aligned_bvec()
845 len = size; in iov_iter_aligned_bvec()
848 if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask) in iov_iter_aligned_bvec()
851 size -= len; in iov_iter_aligned_bvec()
852 if (!size) in iov_iter_aligned_bvec()
859 * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
872 if (i->count & len_mask) in iov_iter_is_aligned()
874 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask) in iov_iter_is_aligned()
886 if (i->count & len_mask) in iov_iter_is_aligned()
888 if ((i->xarray_start + i->iov_offset) & addr_mask) in iov_iter_is_aligned()
899 size_t size = i->count; in iov_iter_alignment_iovec() local
900 size_t skip = i->iov_offset; in iov_iter_alignment_iovec()
903 for (k = 0; k < i->nr_segs; k++, skip = 0) { in iov_iter_alignment_iovec()
905 size_t len = iov->iov_len - skip; in iov_iter_alignment_iovec()
907 res |= (unsigned long)iov->iov_base + skip; in iov_iter_alignment_iovec()
908 if (len > size) in iov_iter_alignment_iovec()
909 len = size; in iov_iter_alignment_iovec()
911 size -= len; in iov_iter_alignment_iovec()
912 if (!size) in iov_iter_alignment_iovec()
922 size_t size = i->count; in iov_iter_alignment_bvec() local
923 unsigned skip = i->iov_offset; in iov_iter_alignment_bvec()
926 for (k = 0; k < i->nr_segs; k++, skip = 0) { in iov_iter_alignment_bvec()
927 size_t len = i->bvec[k].bv_len - skip; in iov_iter_alignment_bvec()
928 res |= (unsigned long)i->bvec[k].bv_offset + skip; in iov_iter_alignment_bvec()
929 if (len > size) in iov_iter_alignment_bvec()
930 len = size; in iov_iter_alignment_bvec()
932 size -= len; in iov_iter_alignment_bvec()
933 if (!size) in iov_iter_alignment_bvec()
942 size_t size = i->count; in iov_iter_alignment() local
943 if (size) in iov_iter_alignment()
944 return ((unsigned long)i->ubuf + i->iov_offset) | size; in iov_iter_alignment()
956 return (i->xarray_start + i->iov_offset) | i->count; in iov_iter_alignment()
966 size_t size = i->count; in iov_iter_gap_alignment() local
975 for (k = 0; k < i->nr_segs; k++) { in iov_iter_gap_alignment()
977 if (iov->iov_len) { in iov_iter_gap_alignment()
978 unsigned long base = (unsigned long)iov->iov_base; in iov_iter_gap_alignment()
981 v = base + iov->iov_len; in iov_iter_gap_alignment()
982 if (size <= iov->iov_len) in iov_iter_gap_alignment()
984 size -= iov->iov_len; in iov_iter_gap_alignment()
991 static int want_pages_array(struct page ***res, size_t size, in want_pages_array() argument
994 unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE); in want_pages_array()
1000 *res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); in want_pages_array()
1007 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa, in iter_xarray_populate_pages()
1011 struct page *page; in iter_xarray_populate_pages() local
1015 for (page = xas_load(&xas); page; page = xas_next(&xas)) { in iter_xarray_populate_pages()
1016 if (xas_retry(&xas, page)) in iter_xarray_populate_pages()
1019 /* Has the page moved or been split? */ in iter_xarray_populate_pages()
1020 if (unlikely(page != xas_reload(&xas))) { in iter_xarray_populate_pages()
1025 pages[ret] = find_subpage(page, xas.xa_index); in iter_xarray_populate_pages()
1035 struct page ***pages, size_t maxsize, in iter_xarray_get_pages()
1042 pos = i->xarray_start + i->iov_offset; in iter_xarray_get_pages()
1049 return -ENOMEM; in iter_xarray_get_pages()
1050 nr = iter_xarray_populate_pages(*pages, i->xarray, index, count); in iter_xarray_get_pages()
1054 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize); in iter_xarray_get_pages()
1055 i->iov_offset += maxsize; in iter_xarray_get_pages()
1056 i->count -= maxsize; in iter_xarray_get_pages()
1060 /* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
1061 static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size) in first_iovec_segment() argument
1067 return (unsigned long)i->ubuf + i->iov_offset; in first_iovec_segment()
1069 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) { in first_iovec_segment()
1071 size_t len = iov->iov_len - skip; in first_iovec_segment()
1075 if (*size > len) in first_iovec_segment()
1076 *size = len; in first_iovec_segment()
1077 return (unsigned long)iov->iov_base + skip; in first_iovec_segment()
1082 /* must be done on non-empty ITER_BVEC one */
1083 static struct page *first_bvec_segment(const struct iov_iter *i, in first_bvec_segment()
1084 size_t *size, size_t *start) in first_bvec_segment() argument
1086 struct page *page; in first_bvec_segment() local
1087 size_t skip = i->iov_offset, len; in first_bvec_segment()
1089 len = i->bvec->bv_len - skip; in first_bvec_segment()
1090 if (*size > len) in first_bvec_segment()
1091 *size = len; in first_bvec_segment()
1092 skip += i->bvec->bv_offset; in first_bvec_segment()
1093 page = i->bvec->bv_page + skip / PAGE_SIZE; in first_bvec_segment()
1095 return page; in first_bvec_segment()
1099 struct page ***pages, size_t maxsize, in __iov_iter_get_pages_alloc()
1104 if (maxsize > i->count) in __iov_iter_get_pages_alloc()
1105 maxsize = i->count; in __iov_iter_get_pages_alloc()
1117 if (i->nofault) in __iov_iter_get_pages_alloc()
1125 return -ENOMEM; in __iov_iter_get_pages_alloc()
1129 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start); in __iov_iter_get_pages_alloc()
1134 struct page **p; in __iov_iter_get_pages_alloc()
1135 struct page *page; in __iov_iter_get_pages_alloc() local
1137 page = first_bvec_segment(i, &maxsize, start); in __iov_iter_get_pages_alloc()
1140 return -ENOMEM; in __iov_iter_get_pages_alloc()
1143 get_page(p[k] = page + k); in __iov_iter_get_pages_alloc()
1144 maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start); in __iov_iter_get_pages_alloc()
1145 i->count -= maxsize; in __iov_iter_get_pages_alloc()
1146 i->iov_offset += maxsize; in __iov_iter_get_pages_alloc()
1147 if (i->iov_offset == i->bvec->bv_len) { in __iov_iter_get_pages_alloc()
1148 i->iov_offset = 0; in __iov_iter_get_pages_alloc()
1149 i->bvec++; in __iov_iter_get_pages_alloc()
1150 i->nr_segs--; in __iov_iter_get_pages_alloc()
1156 return -EFAULT; in __iov_iter_get_pages_alloc()
1159 ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages, in iov_iter_get_pages2()
1171 struct page ***pages, size_t maxsize, size_t *start) in iov_iter_get_pages_alloc2()
1191 if (WARN_ON_ONCE(!i->data_source)) in csum_and_copy_from_iter()
1213 if (WARN_ON_ONCE(i->data_source)) in csum_and_copy_to_iter()
1216 // can't use csum_memcpy() for that one - data is not copied in csum_and_copy_to_iter()
1217 csstate->csum = csum_block_add(csstate->csum, in csum_and_copy_to_iter()
1219 csstate->off); in csum_and_copy_to_iter()
1220 csstate->off += bytes; in csum_and_copy_to_iter()
1224 sum = csum_shift(csstate->csum, csstate->off); in csum_and_copy_to_iter()
1233 csstate->csum = csum_shift(sum, csstate->off); in csum_and_copy_to_iter()
1234 csstate->off += bytes; in csum_and_copy_to_iter()
1260 size_t skip = i->iov_offset, size = i->count; in iov_npages() local
1264 for (p = iter_iov(i); size; skip = 0, p++) { in iov_npages()
1265 unsigned offs = offset_in_page(p->iov_base + skip); in iov_npages()
1266 size_t len = min(p->iov_len - skip, size); in iov_npages()
1269 size -= len; in iov_npages()
1280 size_t skip = i->iov_offset, size = i->count; in bvec_npages() local
1284 for (p = i->bvec; size; skip = 0, p++) { in bvec_npages()
1285 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE; in bvec_npages()
1286 size_t len = min(p->bv_len - skip, size); in bvec_npages()
1288 size -= len; in bvec_npages()
1298 if (unlikely(!i->count)) in iov_iter_npages()
1301 unsigned offs = offset_in_page(i->ubuf + i->iov_offset); in iov_iter_npages()
1302 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE); in iov_iter_npages()
1311 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE; in iov_iter_npages()
1312 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); in iov_iter_npages()
1323 return new->bvec = kmemdup(new->bvec, in dup_iter()
1324 new->nr_segs * sizeof(struct bio_vec), in dup_iter()
1328 return new->__iov = kmemdup(new->__iov, in dup_iter()
1329 new->nr_segs * sizeof(struct iovec), in dup_iter()
1340 int ret = -EFAULT, i; in copy_compat_iovec_from_user()
1343 return -EFAULT; in copy_compat_iovec_from_user()
1354 ret = -EINVAL; in copy_compat_iovec_from_user()
1370 int ret = -EFAULT; in copy_iovec_from_user()
1373 return -EFAULT; in copy_iovec_from_user()
1379 unsafe_get_user(len, &uiov->iov_len, uaccess_end); in copy_iovec_from_user()
1380 unsafe_get_user(buf, &uiov->iov_base, uaccess_end); in copy_iovec_from_user()
1384 ret = -EINVAL; in copy_iovec_from_user()
1387 iov->iov_base = buf; in copy_iovec_from_user()
1388 iov->iov_len = len; in copy_iovec_from_user()
1391 } while (--nr_segs); in copy_iovec_from_user()
1414 return ERR_PTR(-EINVAL); in iovec_from_user()
1418 return ERR_PTR(-ENOMEM); in iovec_from_user()
1453 ret = import_ubuf(type, iov->iov_base, iov->iov_len, i); in __import_iovec_ubuf()
1456 return i->count; in __import_iovec_ubuf()
1491 return -EFAULT; in __import_iovec()
1494 if (len > MAX_RW_COUNT - total_len) { in __import_iovec()
1495 len = MAX_RW_COUNT - total_len; in __import_iovec()
1510 * import_iovec() - Copy an array of &struct iovec from userspace
1519 * on-stack) kernel array.
1526 * on-stack array was used or not (and regardless of whether this function
1546 return -EFAULT; in import_single_range()
1558 return -EFAULT; in import_ubuf()
1566 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
1582 i->iov_offset = state->iov_offset; in iov_iter_restore()
1583 i->count = state->count; in iov_iter_restore()
1587 * For the *vec iters, nr_segs + iov is constant - if we increment in iov_iter_restore()
1591 * size, so we can just increment the iov pointer as they are unionzed. in iov_iter_restore()
1592 * ITER_BVEC _may_ be the same size on some archs, but on others it is in iov_iter_restore()
1597 i->bvec -= state->nr_segs - i->nr_segs; in iov_iter_restore()
1599 i->__iov -= state->nr_segs - i->nr_segs; in iov_iter_restore()
1600 i->nr_segs = state->nr_segs; in iov_iter_restore()
1608 struct page ***pages, size_t maxsize, in iov_iter_extract_xarray_pages()
1613 struct page *page, **p; in iov_iter_extract_xarray_pages() local
1615 loff_t pos = i->xarray_start + i->iov_offset; in iov_iter_extract_xarray_pages()
1617 XA_STATE(xas, i->xarray, index); in iov_iter_extract_xarray_pages()
1624 return -ENOMEM; in iov_iter_extract_xarray_pages()
1628 for (page = xas_load(&xas); page; page = xas_next(&xas)) { in iov_iter_extract_xarray_pages()
1629 if (xas_retry(&xas, page)) in iov_iter_extract_xarray_pages()
1632 /* Has the page moved or been split? */ in iov_iter_extract_xarray_pages()
1633 if (unlikely(page != xas_reload(&xas))) { in iov_iter_extract_xarray_pages()
1638 p[nr++] = find_subpage(page, xas.xa_index); in iov_iter_extract_xarray_pages()
1644 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize); in iov_iter_extract_xarray_pages()
1654 struct page ***pages, size_t maxsize, in iov_iter_extract_bvec_pages()
1659 struct page **p, *page; in iov_iter_extract_bvec_pages() local
1660 size_t skip = i->iov_offset, offset, size; in iov_iter_extract_bvec_pages() local
1664 if (i->nr_segs == 0) in iov_iter_extract_bvec_pages()
1666 size = min(maxsize, i->bvec->bv_len - skip); in iov_iter_extract_bvec_pages()
1667 if (size) in iov_iter_extract_bvec_pages()
1669 i->iov_offset = 0; in iov_iter_extract_bvec_pages()
1670 i->nr_segs--; in iov_iter_extract_bvec_pages()
1671 i->bvec++; in iov_iter_extract_bvec_pages()
1675 skip += i->bvec->bv_offset; in iov_iter_extract_bvec_pages()
1676 page = i->bvec->bv_page + skip / PAGE_SIZE; in iov_iter_extract_bvec_pages()
1680 maxpages = want_pages_array(pages, size, offset, maxpages); in iov_iter_extract_bvec_pages()
1682 return -ENOMEM; in iov_iter_extract_bvec_pages()
1685 p[k] = page + k; in iov_iter_extract_bvec_pages()
1687 size = min_t(size_t, size, maxpages * PAGE_SIZE - offset); in iov_iter_extract_bvec_pages()
1688 iov_iter_advance(i, size); in iov_iter_extract_bvec_pages()
1689 return size; in iov_iter_extract_bvec_pages()
1697 struct page ***pages, size_t maxsize, in iov_iter_extract_kvec_pages()
1702 struct page **p, *page; in iov_iter_extract_kvec_pages() local
1704 size_t skip = i->iov_offset, offset, len, size; in iov_iter_extract_kvec_pages() local
1708 if (i->nr_segs == 0) in iov_iter_extract_kvec_pages()
1710 size = min(maxsize, i->kvec->iov_len - skip); in iov_iter_extract_kvec_pages()
1711 if (size) in iov_iter_extract_kvec_pages()
1713 i->iov_offset = 0; in iov_iter_extract_kvec_pages()
1714 i->nr_segs--; in iov_iter_extract_kvec_pages()
1715 i->kvec++; in iov_iter_extract_kvec_pages()
1719 kaddr = i->kvec->iov_base + skip; in iov_iter_extract_kvec_pages()
1723 maxpages = want_pages_array(pages, size, offset, maxpages); in iov_iter_extract_kvec_pages()
1725 return -ENOMEM; in iov_iter_extract_kvec_pages()
1728 kaddr -= offset; in iov_iter_extract_kvec_pages()
1729 len = offset + size; in iov_iter_extract_kvec_pages()
1734 page = vmalloc_to_page(kaddr); in iov_iter_extract_kvec_pages()
1736 page = virt_to_page(kaddr); in iov_iter_extract_kvec_pages()
1738 p[k] = page; in iov_iter_extract_kvec_pages()
1739 len -= seg; in iov_iter_extract_kvec_pages()
1743 size = min_t(size_t, size, maxpages * PAGE_SIZE - offset); in iov_iter_extract_kvec_pages()
1744 iov_iter_advance(i, size); in iov_iter_extract_kvec_pages()
1745 return size; in iov_iter_extract_kvec_pages()
1750 * each of them. This should only be used if the iterator is user-backed
1758 * child a copy of the page.
1761 struct page ***pages, in iov_iter_extract_user_pages()
1772 if (i->data_source == ITER_DEST) in iov_iter_extract_user_pages()
1776 if (i->nofault) in iov_iter_extract_user_pages()
1784 return -ENOMEM; in iov_iter_extract_user_pages()
1788 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - offset); in iov_iter_extract_user_pages()
1794 * iov_iter_extract_pages - Extract a list of contiguous pages from an iterator
1798 * @maxpages: The maximum size of the list of pages
1804 * of page contents can be set.
1806 * If *@pages is NULL, a page list will be allocated to the required size and
1808 * that the caller allocated a page list at least @maxpages in size and this
1811 * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA
1819 * (*) If the iterator is user-backed (ITER_IOVEC/ITER_UBUF), pins will be
1832 * sets *offset0 to the offset into the first page.
1834 * It may also return -ENOMEM and -EFAULT.
1837 struct page ***pages, in iov_iter_extract_pages()
1843 maxsize = min_t(size_t, min_t(size_t, maxsize, i->count), MAX_RW_COUNT); in iov_iter_extract_pages()
1863 return -EFAULT; in iov_iter_extract_pages()