xref: /openbmc/linux/include/linux/uio.h (revision 1b030698)
12874c5fdSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-or-later */
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  *	Berkeley style UIO structures	-	Alan Cox 1994.
41da177e4SLinus Torvalds  */
5607ca46eSDavid Howells #ifndef __LINUX_UIO_H
6607ca46eSDavid Howells #define __LINUX_UIO_H
71da177e4SLinus Torvalds 
892236878SKent Overstreet #include <linux/kernel.h>
9aa28de27SAl Viro #include <linux/thread_info.h>
10d9c19d32SMatthew Wilcox (Oracle) #include <linux/mm_types.h>
11607ca46eSDavid Howells #include <uapi/linux/uio.h>
121da177e4SLinus Torvalds 
1392236878SKent Overstreet struct page;
14812ed032SJiri Slaby 
15f62e52d1SDavid Howells typedef unsigned int __bitwise iov_iter_extraction_t;
16f62e52d1SDavid Howells 
17812ed032SJiri Slaby struct kvec {
18812ed032SJiri Slaby 	void *iov_base; /* and that should *never* hold a userland pointer */
19812ed032SJiri Slaby 	size_t iov_len;
20812ed032SJiri Slaby };
21812ed032SJiri Slaby 
2200e23707SDavid Howells enum iter_type {
23875f1d07SJens Axboe 	/* iter types */
248cd54c1cSAl Viro 	ITER_IOVEC,
258cd54c1cSAl Viro 	ITER_KVEC,
268cd54c1cSAl Viro 	ITER_BVEC,
278cd54c1cSAl Viro 	ITER_XARRAY,
288cd54c1cSAl Viro 	ITER_DISCARD,
29fcb14cb1SAl Viro 	ITER_UBUF,
3062a8067aSAl Viro };
3162a8067aSAl Viro 
32de4eda9dSAl Viro #define ITER_SOURCE	1	// == WRITE
33de4eda9dSAl Viro #define ITER_DEST	0	// == READ
34de4eda9dSAl Viro 
358fb0f47aSJens Axboe struct iov_iter_state {
368fb0f47aSJens Axboe 	size_t iov_offset;
378fb0f47aSJens Axboe 	size_t count;
388fb0f47aSJens Axboe 	unsigned long nr_segs;
398fb0f47aSJens Axboe };
408fb0f47aSJens Axboe 
4192236878SKent Overstreet struct iov_iter {
428cd54c1cSAl Viro 	u8 iter_type;
43245f0922SKefeng Wang 	bool copy_mc;
443337ab08SAndreas Gruenbacher 	bool nofault;
458cd54c1cSAl Viro 	bool data_source;
46fcb14cb1SAl Viro 	bool user_backed;
4710f525a8SAl Viro 	union {
4892236878SKent Overstreet 		size_t iov_offset;
4910f525a8SAl Viro 		int last_offset;
5010f525a8SAl Viro 	};
51747b1f65SJens Axboe 	/*
52747b1f65SJens Axboe 	 * Hack alert: overlay ubuf_iovec with iovec + count, so
53747b1f65SJens Axboe 	 * that the members resolve correctly regardless of the type
54747b1f65SJens Axboe 	 * of iterator used. This means that you can use:
55747b1f65SJens Axboe 	 *
56747b1f65SJens Axboe 	 * &iter->__ubuf_iovec or iter->__iov
57747b1f65SJens Axboe 	 *
58747b1f65SJens Axboe 	 * interchangably for the user_backed cases, hence simplifying
59747b1f65SJens Axboe 	 * some of the cases that need to deal with both.
60747b1f65SJens Axboe 	 */
61747b1f65SJens Axboe 	union {
62747b1f65SJens Axboe 		/*
63747b1f65SJens Axboe 		 * This really should be a const, but we cannot do that without
64747b1f65SJens Axboe 		 * also modifying any of the zero-filling iter init functions.
65747b1f65SJens Axboe 		 * Leave it non-const for now, but it should be treated as such.
66747b1f65SJens Axboe 		 */
67747b1f65SJens Axboe 		struct iovec __ubuf_iovec;
68747b1f65SJens Axboe 		struct {
6962a8067aSAl Viro 			union {
70de4f5fedSJens Axboe 				/* use iter_iov() to get the current vec */
71de4f5fedSJens Axboe 				const struct iovec *__iov;
72a280455fSAl Viro 				const struct kvec *kvec;
7362a8067aSAl Viro 				const struct bio_vec *bvec;
747ff50620SDavid Howells 				struct xarray *xarray;
75fcb14cb1SAl Viro 				void __user *ubuf;
7662a8067aSAl Viro 			};
77747b1f65SJens Axboe 			size_t count;
78747b1f65SJens Axboe 		};
79747b1f65SJens Axboe 	};
80241699cdSAl Viro 	union {
8162a8067aSAl Viro 		unsigned long nr_segs;
827ff50620SDavid Howells 		loff_t xarray_start;
83241699cdSAl Viro 	};
8492236878SKent Overstreet };
8592236878SKent Overstreet 
iter_iov(const struct iov_iter * iter)86747b1f65SJens Axboe static inline const struct iovec *iter_iov(const struct iov_iter *iter)
87747b1f65SJens Axboe {
88747b1f65SJens Axboe 	if (iter->iter_type == ITER_UBUF)
89747b1f65SJens Axboe 		return (const struct iovec *) &iter->__ubuf_iovec;
90747b1f65SJens Axboe 	return iter->__iov;
91747b1f65SJens Axboe }
92747b1f65SJens Axboe 
9395e49cf8SJens Axboe #define iter_iov_addr(iter)	(iter_iov(iter)->iov_base + (iter)->iov_offset)
9495e49cf8SJens Axboe #define iter_iov_len(iter)	(iter_iov(iter)->iov_len - (iter)->iov_offset)
95de4f5fedSJens Axboe 
iov_iter_type(const struct iov_iter * i)9600e23707SDavid Howells static inline enum iter_type iov_iter_type(const struct iov_iter *i)
9700e23707SDavid Howells {
988cd54c1cSAl Viro 	return i->iter_type;
9900e23707SDavid Howells }
10000e23707SDavid Howells 
iov_iter_save_state(struct iov_iter * iter,struct iov_iter_state * state)1018fb0f47aSJens Axboe static inline void iov_iter_save_state(struct iov_iter *iter,
1028fb0f47aSJens Axboe 				       struct iov_iter_state *state)
1038fb0f47aSJens Axboe {
1048fb0f47aSJens Axboe 	state->iov_offset = iter->iov_offset;
1058fb0f47aSJens Axboe 	state->count = iter->count;
1068fb0f47aSJens Axboe 	state->nr_segs = iter->nr_segs;
1078fb0f47aSJens Axboe }
1088fb0f47aSJens Axboe 
iter_is_ubuf(const struct iov_iter * i)109fcb14cb1SAl Viro static inline bool iter_is_ubuf(const struct iov_iter *i)
110fcb14cb1SAl Viro {
111fcb14cb1SAl Viro 	return iov_iter_type(i) == ITER_UBUF;
112fcb14cb1SAl Viro }
113fcb14cb1SAl Viro 
iter_is_iovec(const struct iov_iter * i)11400e23707SDavid Howells static inline bool iter_is_iovec(const struct iov_iter *i)
11500e23707SDavid Howells {
11600e23707SDavid Howells 	return iov_iter_type(i) == ITER_IOVEC;
11700e23707SDavid Howells }
11800e23707SDavid Howells 
iov_iter_is_kvec(const struct iov_iter * i)11900e23707SDavid Howells static inline bool iov_iter_is_kvec(const struct iov_iter *i)
12000e23707SDavid Howells {
12100e23707SDavid Howells 	return iov_iter_type(i) == ITER_KVEC;
12200e23707SDavid Howells }
12300e23707SDavid Howells 
iov_iter_is_bvec(const struct iov_iter * i)12400e23707SDavid Howells static inline bool iov_iter_is_bvec(const struct iov_iter *i)
12500e23707SDavid Howells {
12600e23707SDavid Howells 	return iov_iter_type(i) == ITER_BVEC;
12700e23707SDavid Howells }
12800e23707SDavid Howells 
iov_iter_is_discard(const struct iov_iter * i)1299ea9ce04SDavid Howells static inline bool iov_iter_is_discard(const struct iov_iter *i)
1309ea9ce04SDavid Howells {
1319ea9ce04SDavid Howells 	return iov_iter_type(i) == ITER_DISCARD;
1329ea9ce04SDavid Howells }
1339ea9ce04SDavid Howells 
iov_iter_is_xarray(const struct iov_iter * i)1347ff50620SDavid Howells static inline bool iov_iter_is_xarray(const struct iov_iter *i)
1357ff50620SDavid Howells {
1367ff50620SDavid Howells 	return iov_iter_type(i) == ITER_XARRAY;
1377ff50620SDavid Howells }
1387ff50620SDavid Howells 
iov_iter_rw(const struct iov_iter * i)13900e23707SDavid Howells static inline unsigned char iov_iter_rw(const struct iov_iter *i)
14000e23707SDavid Howells {
1418cd54c1cSAl Viro 	return i->data_source ? WRITE : READ;
14200e23707SDavid Howells }
14300e23707SDavid Howells 
user_backed_iter(const struct iov_iter * i)144fcb14cb1SAl Viro static inline bool user_backed_iter(const struct iov_iter *i)
145fcb14cb1SAl Viro {
146fcb14cb1SAl Viro 	return i->user_backed;
147fcb14cb1SAl Viro }
148fcb14cb1SAl Viro 
1491da177e4SLinus Torvalds /*
1501da177e4SLinus Torvalds  * Total number of bytes covered by an iovec.
1511da177e4SLinus Torvalds  *
1521da177e4SLinus Torvalds  * NOTE that it is not safe to use this function until all the iovec's
1531da177e4SLinus Torvalds  * segment lengths have been validated.  Because the individual lengths can
1541da177e4SLinus Torvalds  * overflow a size_t when added together.
1551da177e4SLinus Torvalds  */
iov_length(const struct iovec * iov,unsigned long nr_segs)1561da177e4SLinus Torvalds static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
1571da177e4SLinus Torvalds {
1581da177e4SLinus Torvalds 	unsigned long seg;
1591da177e4SLinus Torvalds 	size_t ret = 0;
1601da177e4SLinus Torvalds 
1611da177e4SLinus Torvalds 	for (seg = 0; seg < nr_segs; seg++)
1621da177e4SLinus Torvalds 		ret += iov[seg].iov_len;
1631da177e4SLinus Torvalds 	return ret;
1641da177e4SLinus Torvalds }
1651da177e4SLinus Torvalds 
166*1b030698SMatthew Wilcox (Oracle) size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
167f0b65f39SAl Viro 				  size_t bytes, struct iov_iter *i);
16892236878SKent Overstreet void iov_iter_advance(struct iov_iter *i, size_t bytes);
16927c0e374SAl Viro void iov_iter_revert(struct iov_iter *i, size_t bytes);
170a6294593SAndreas Gruenbacher size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes);
171cdd591fcSAndreas Gruenbacher size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes);
17292236878SKent Overstreet size_t iov_iter_single_seg_count(const struct iov_iter *i);
1736e58e79dSAl Viro size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
1746e58e79dSAl Viro 			 struct iov_iter *i);
175f0d1bec9SAl Viro size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
176f0d1bec9SAl Viro 			 struct iov_iter *i);
177aa28de27SAl Viro 
178aa28de27SAl Viro size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
179aa28de27SAl Viro size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
180aa28de27SAl Viro size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
181aa28de27SAl Viro 
copy_folio_to_iter(struct folio * folio,size_t offset,size_t bytes,struct iov_iter * i)182d9c19d32SMatthew Wilcox (Oracle) static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
183d9c19d32SMatthew Wilcox (Oracle) 		size_t bytes, struct iov_iter *i)
184d9c19d32SMatthew Wilcox (Oracle) {
185d9c19d32SMatthew Wilcox (Oracle) 	return copy_page_to_iter(&folio->page, offset, bytes, i);
186d9c19d32SMatthew Wilcox (Oracle) }
187*1b030698SMatthew Wilcox (Oracle) 
copy_folio_from_iter_atomic(struct folio * folio,size_t offset,size_t bytes,struct iov_iter * i)188*1b030698SMatthew Wilcox (Oracle) static inline size_t copy_folio_from_iter_atomic(struct folio *folio,
189*1b030698SMatthew Wilcox (Oracle) 		size_t offset, size_t bytes, struct iov_iter *i)
190*1b030698SMatthew Wilcox (Oracle) {
191*1b030698SMatthew Wilcox (Oracle) 	return copy_page_from_iter_atomic(&folio->page, offset, bytes, i);
192*1b030698SMatthew Wilcox (Oracle) }
193*1b030698SMatthew Wilcox (Oracle) 
1944f80818bSLorenzo Stoakes size_t copy_page_to_iter_nofault(struct page *page, unsigned offset,
1954f80818bSLorenzo Stoakes 				 size_t bytes, struct iov_iter *i);
196d9c19d32SMatthew Wilcox (Oracle) 
197aa28de27SAl Viro static __always_inline __must_check
copy_to_iter(const void * addr,size_t bytes,struct iov_iter * i)198aa28de27SAl Viro size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
199aa28de27SAl Viro {
2000e3c3b90SAl Viro 	if (check_copy_size(addr, bytes, true))
201aa28de27SAl Viro 		return _copy_to_iter(addr, bytes, i);
2020e3c3b90SAl Viro 	return 0;
203aa28de27SAl Viro }
204aa28de27SAl Viro 
205aa28de27SAl Viro static __always_inline __must_check
copy_from_iter(void * addr,size_t bytes,struct iov_iter * i)206aa28de27SAl Viro size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
207aa28de27SAl Viro {
2080e3c3b90SAl Viro 	if (check_copy_size(addr, bytes, false))
209aa28de27SAl Viro 		return _copy_from_iter(addr, bytes, i);
2100e3c3b90SAl Viro 	return 0;
211aa28de27SAl Viro }
212aa28de27SAl Viro 
213aa28de27SAl Viro static __always_inline __must_check
copy_from_iter_full(void * addr,size_t bytes,struct iov_iter * i)214aa28de27SAl Viro bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
215aa28de27SAl Viro {
2164b6c132bSAl Viro 	size_t copied = copy_from_iter(addr, bytes, i);
2174b6c132bSAl Viro 	if (likely(copied == bytes))
2184b6c132bSAl Viro 		return true;
2194b6c132bSAl Viro 	iov_iter_revert(i, copied);
220aa28de27SAl Viro 	return false;
221aa28de27SAl Viro }
222aa28de27SAl Viro 
223aa28de27SAl Viro static __always_inline __must_check
copy_from_iter_nocache(void * addr,size_t bytes,struct iov_iter * i)224aa28de27SAl Viro size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
225aa28de27SAl Viro {
2260e3c3b90SAl Viro 	if (check_copy_size(addr, bytes, false))
227aa28de27SAl Viro 		return _copy_from_iter_nocache(addr, bytes, i);
2280e3c3b90SAl Viro 	return 0;
229aa28de27SAl Viro }
230aa28de27SAl Viro 
231aa28de27SAl Viro static __always_inline __must_check
copy_from_iter_full_nocache(void * addr,size_t bytes,struct iov_iter * i)232aa28de27SAl Viro bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
233aa28de27SAl Viro {
2344b6c132bSAl Viro 	size_t copied = copy_from_iter_nocache(addr, bytes, i);
2354b6c132bSAl Viro 	if (likely(copied == bytes))
2364b6c132bSAl Viro 		return true;
2374b6c132bSAl Viro 	iov_iter_revert(i, copied);
238aa28de27SAl Viro 	return false;
239aa28de27SAl Viro }
240aa28de27SAl Viro 
2410aed55afSDan Williams #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
2420aed55afSDan Williams /*
2430aed55afSDan Williams  * Note, users like pmem that depend on the stricter semantics of
244e17f7a0bSChristoph Hellwig  * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for
2450aed55afSDan Williams  * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
2460aed55afSDan Williams  * destination is flushed from the cache on return.
2470aed55afSDan Williams  */
2486a37e940SLinus Torvalds size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
2490aed55afSDan Williams #else
2506a37e940SLinus Torvalds #define _copy_from_iter_flushcache _copy_from_iter_nocache
2510aed55afSDan Williams #endif
2526a37e940SLinus Torvalds 
253ec6347bbSDan Williams #ifdef CONFIG_ARCH_HAS_COPY_MC
254ec6347bbSDan Williams size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
iov_iter_set_copy_mc(struct iov_iter * i)255245f0922SKefeng Wang static inline void iov_iter_set_copy_mc(struct iov_iter *i)
256245f0922SKefeng Wang {
257245f0922SKefeng Wang 	i->copy_mc = true;
258245f0922SKefeng Wang }
259245f0922SKefeng Wang 
iov_iter_is_copy_mc(const struct iov_iter * i)260245f0922SKefeng Wang static inline bool iov_iter_is_copy_mc(const struct iov_iter *i)
261245f0922SKefeng Wang {
262245f0922SKefeng Wang 	return i->copy_mc;
263245f0922SKefeng Wang }
2648780356eSDan Williams #else
265ec6347bbSDan Williams #define _copy_mc_to_iter _copy_to_iter
iov_iter_set_copy_mc(struct iov_iter * i)266245f0922SKefeng Wang static inline void iov_iter_set_copy_mc(struct iov_iter *i) { }
iov_iter_is_copy_mc(const struct iov_iter * i)267245f0922SKefeng Wang static inline bool iov_iter_is_copy_mc(const struct iov_iter *i)
268245f0922SKefeng Wang {
269245f0922SKefeng Wang 	return false;
270245f0922SKefeng Wang }
2718780356eSDan Williams #endif
2728780356eSDan Williams 
273c35e0248SMatthew Wilcox size_t iov_iter_zero(size_t bytes, struct iov_iter *);
274cfa320f7SKeith Busch bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
275cfa320f7SKeith Busch 			unsigned len_mask);
276886a3911SAl Viro unsigned long iov_iter_alignment(const struct iov_iter *i);
277357f435dSAl Viro unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
278aa563d7bSDavid Howells void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
27971d8e532SAl Viro 			unsigned long nr_segs, size_t count);
280aa563d7bSDavid Howells void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
28105afcb77SAl Viro 			unsigned long nr_segs, size_t count);
282aa563d7bSDavid Howells void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
283abb78f87SAl Viro 			unsigned long nr_segs, size_t count);
2849ea9ce04SDavid Howells void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
2857ff50620SDavid Howells void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
2867ff50620SDavid Howells 		     loff_t start, size_t count);
287eba2d3d7SAl Viro ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
2882c80929cSMiklos Szeredi 			size_t maxsize, unsigned maxpages, size_t *start);
289eba2d3d7SAl Viro ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
29091f79c43SAl Viro 			size_t maxsize, size_t *start);
291f67da30cSAl Viro int iov_iter_npages(const struct iov_iter *i, int maxpages);
2928fb0f47aSJens Axboe void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
29392236878SKent Overstreet 
2944b8164b9SAl Viro const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
2954b8164b9SAl Viro 
iov_iter_count(const struct iov_iter * i)296b57332b4SAl Viro static inline size_t iov_iter_count(const struct iov_iter *i)
29792236878SKent Overstreet {
29892236878SKent Overstreet 	return i->count;
29992236878SKent Overstreet }
30092236878SKent Overstreet 
301bd8e0ff9SOmar Sandoval /*
3020b86dbf6SAl Viro  * Cap the iov_iter by given limit; note that the second argument is
3030b86dbf6SAl Viro  * *not* the new size - it's upper limit for such.  Passing it a value
3040b86dbf6SAl Viro  * greater than the amount of data in iov_iter is fine - it'll just do
3050b86dbf6SAl Viro  * nothing in that case.
3060b86dbf6SAl Viro  */
iov_iter_truncate(struct iov_iter * i,u64 count)3070b86dbf6SAl Viro static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
3080c949334SAl Viro {
3090b86dbf6SAl Viro 	/*
3100b86dbf6SAl Viro 	 * count doesn't have to fit in size_t - comparison extends both
3110b86dbf6SAl Viro 	 * operands to u64 here and any value that would be truncated by
3120b86dbf6SAl Viro 	 * conversion in assignement is by definition greater than all
3130b86dbf6SAl Viro 	 * values of size_t, including old i->count.
3140b86dbf6SAl Viro 	 */
3157dedd3e1SJens Axboe 	if (i->count > count)
3160c949334SAl Viro 		i->count = count;
3170c949334SAl Viro }
3180c949334SAl Viro 
319b42b15fdSAl Viro /*
320b42b15fdSAl Viro  * reexpand a previously truncated iterator; count must be no more than how much
321b42b15fdSAl Viro  * we had shrunk it.
322b42b15fdSAl Viro  */
iov_iter_reexpand(struct iov_iter * i,size_t count)323b42b15fdSAl Viro static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
324b42b15fdSAl Viro {
325b42b15fdSAl Viro 	i->count = count;
326b42b15fdSAl Viro }
32752cbd23aSWillem de Bruijn 
328b93235e6SJakub Kicinski static inline int
iov_iter_npages_cap(struct iov_iter * i,int maxpages,size_t max_bytes)329b93235e6SJakub Kicinski iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
330b93235e6SJakub Kicinski {
331b93235e6SJakub Kicinski 	size_t shorted = 0;
332b93235e6SJakub Kicinski 	int npages;
333b93235e6SJakub Kicinski 
334b93235e6SJakub Kicinski 	if (iov_iter_count(i) > max_bytes) {
335b93235e6SJakub Kicinski 		shorted = iov_iter_count(i) - max_bytes;
336b93235e6SJakub Kicinski 		iov_iter_truncate(i, max_bytes);
337b93235e6SJakub Kicinski 	}
3387187440dSDan Carpenter 	npages = iov_iter_npages(i, maxpages);
339b93235e6SJakub Kicinski 	if (shorted)
340b93235e6SJakub Kicinski 		iov_iter_reexpand(i, iov_iter_count(i) + shorted);
341b93235e6SJakub Kicinski 
342b93235e6SJakub Kicinski 	return npages;
343b93235e6SJakub Kicinski }
344b93235e6SJakub Kicinski 
34552cbd23aSWillem de Bruijn struct csum_state {
34652cbd23aSWillem de Bruijn 	__wsum csum;
34752cbd23aSWillem de Bruijn 	size_t off;
34852cbd23aSWillem de Bruijn };
34952cbd23aSWillem de Bruijn 
35052cbd23aSWillem de Bruijn size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
351a604ec7eSAl Viro size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
3524b6c132bSAl Viro 
3534b6c132bSAl Viro static __always_inline __must_check
csum_and_copy_from_iter_full(void * addr,size_t bytes,__wsum * csum,struct iov_iter * i)3544b6c132bSAl Viro bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
3554b6c132bSAl Viro 				  __wsum *csum, struct iov_iter *i)
3564b6c132bSAl Viro {
3574b6c132bSAl Viro 	size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
3584b6c132bSAl Viro 	if (likely(copied == bytes))
3594b6c132bSAl Viro 		return true;
3604b6c132bSAl Viro 	iov_iter_revert(i, copied);
3614b6c132bSAl Viro 	return false;
3624b6c132bSAl Viro }
363d05f4435SSagi Grimberg size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
364d05f4435SSagi Grimberg 		struct iov_iter *i);
365b42b15fdSAl Viro 
366bfdc5970SChristoph Hellwig struct iovec *iovec_from_user(const struct iovec __user *uvector,
367bfdc5970SChristoph Hellwig 		unsigned long nr_segs, unsigned long fast_segs,
368bfdc5970SChristoph Hellwig 		struct iovec *fast_iov, bool compat);
369bfdc5970SChristoph Hellwig ssize_t import_iovec(int type, const struct iovec __user *uvec,
370bfdc5970SChristoph Hellwig 		 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
371bfdc5970SChristoph Hellwig 		 struct iov_iter *i);
372bfdc5970SChristoph Hellwig ssize_t __import_iovec(int type, const struct iovec __user *uvec,
373bfdc5970SChristoph Hellwig 		 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
374bfdc5970SChristoph Hellwig 		 struct iov_iter *i, bool compat);
375bc917be8SAl Viro int import_single_range(int type, void __user *buf, size_t len,
376bc917be8SAl Viro 		 struct iovec *iov, struct iov_iter *i);
3772ad9bd83SJens Axboe int import_ubuf(int type, void __user *buf, size_t len, struct iov_iter *i);
378bc917be8SAl Viro 
iov_iter_ubuf(struct iov_iter * i,unsigned int direction,void __user * buf,size_t count)379fcb14cb1SAl Viro static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
380fcb14cb1SAl Viro 			void __user *buf, size_t count)
381fcb14cb1SAl Viro {
382fcb14cb1SAl Viro 	WARN_ON(direction & ~(READ | WRITE));
383fcb14cb1SAl Viro 	*i = (struct iov_iter) {
384fcb14cb1SAl Viro 		.iter_type = ITER_UBUF,
385245f0922SKefeng Wang 		.copy_mc = false,
386fcb14cb1SAl Viro 		.user_backed = true,
387fcb14cb1SAl Viro 		.data_source = direction,
388fcb14cb1SAl Viro 		.ubuf = buf,
389cd0bd57aSJens Axboe 		.count = count,
390cd0bd57aSJens Axboe 		.nr_segs = 1
391fcb14cb1SAl Viro 	};
392fcb14cb1SAl Viro }
393f62e52d1SDavid Howells /* Flags for iov_iter_get/extract_pages*() */
394f62e52d1SDavid Howells /* Allow P2PDMA on the extracted pages */
395f62e52d1SDavid Howells #define ITER_ALLOW_P2PDMA	((__force iov_iter_extraction_t)0x01)
396f62e52d1SDavid Howells 
3977d58fe73SDavid Howells ssize_t iov_iter_extract_pages(struct iov_iter *i, struct page ***pages,
3987d58fe73SDavid Howells 			       size_t maxsize, unsigned int maxpages,
3997d58fe73SDavid Howells 			       iov_iter_extraction_t extraction_flags,
4007d58fe73SDavid Howells 			       size_t *offset0);
4017d58fe73SDavid Howells 
4027d58fe73SDavid Howells /**
4037d58fe73SDavid Howells  * iov_iter_extract_will_pin - Indicate how pages from the iterator will be retained
4047d58fe73SDavid Howells  * @iter: The iterator
4057d58fe73SDavid Howells  *
4067d58fe73SDavid Howells  * Examine the iterator and indicate by returning true or false as to how, if
4077d58fe73SDavid Howells  * at all, pages extracted from the iterator will be retained by the extraction
4087d58fe73SDavid Howells  * function.
4097d58fe73SDavid Howells  *
4107d58fe73SDavid Howells  * %true indicates that the pages will have a pin placed in them that the
4117d58fe73SDavid Howells  * caller must unpin.  This is must be done for DMA/async DIO to force fork()
4127d58fe73SDavid Howells  * to forcibly copy a page for the child (the parent must retain the original
4137d58fe73SDavid Howells  * page).
4147d58fe73SDavid Howells  *
4157d58fe73SDavid Howells  * %false indicates that no measures are taken and that it's up to the caller
4167d58fe73SDavid Howells  * to retain the pages.
4177d58fe73SDavid Howells  */
iov_iter_extract_will_pin(const struct iov_iter * iter)4187d58fe73SDavid Howells static inline bool iov_iter_extract_will_pin(const struct iov_iter *iter)
4197d58fe73SDavid Howells {
4207d58fe73SDavid Howells 	return user_backed_iter(iter);
4217d58fe73SDavid Howells }
422fcb14cb1SAl Viro 
423f5f82cd1SDavid Howells struct sg_table;
424f5f82cd1SDavid Howells ssize_t extract_iter_to_sg(struct iov_iter *iter, size_t len,
425f5f82cd1SDavid Howells 			   struct sg_table *sgtable, unsigned int sg_max,
426f5f82cd1SDavid Howells 			   iov_iter_extraction_t extraction_flags);
427f5f82cd1SDavid Howells 
428812ed032SJiri Slaby #endif
429