xref: /openbmc/linux/include/linux/uio.h (revision 1b030698)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  *	Berkeley style UIO structures	-	Alan Cox 1994.
4  */
5 #ifndef __LINUX_UIO_H
6 #define __LINUX_UIO_H
7 
8 #include <linux/kernel.h>
9 #include <linux/thread_info.h>
10 #include <linux/mm_types.h>
11 #include <uapi/linux/uio.h>
12 
13 struct page;
14 
15 typedef unsigned int __bitwise iov_iter_extraction_t;
16 
17 struct kvec {
18 	void *iov_base; /* and that should *never* hold a userland pointer */
19 	size_t iov_len;
20 };
21 
22 enum iter_type {
23 	/* iter types */
24 	ITER_IOVEC,
25 	ITER_KVEC,
26 	ITER_BVEC,
27 	ITER_XARRAY,
28 	ITER_DISCARD,
29 	ITER_UBUF,
30 };
31 
32 #define ITER_SOURCE	1	// == WRITE
33 #define ITER_DEST	0	// == READ
34 
35 struct iov_iter_state {
36 	size_t iov_offset;
37 	size_t count;
38 	unsigned long nr_segs;
39 };
40 
41 struct iov_iter {
42 	u8 iter_type;
43 	bool copy_mc;
44 	bool nofault;
45 	bool data_source;
46 	bool user_backed;
47 	union {
48 		size_t iov_offset;
49 		int last_offset;
50 	};
51 	/*
52 	 * Hack alert: overlay ubuf_iovec with iovec + count, so
53 	 * that the members resolve correctly regardless of the type
54 	 * of iterator used. This means that you can use:
55 	 *
56 	 * &iter->__ubuf_iovec or iter->__iov
57 	 *
58 	 * interchangably for the user_backed cases, hence simplifying
59 	 * some of the cases that need to deal with both.
60 	 */
61 	union {
62 		/*
63 		 * This really should be a const, but we cannot do that without
64 		 * also modifying any of the zero-filling iter init functions.
65 		 * Leave it non-const for now, but it should be treated as such.
66 		 */
67 		struct iovec __ubuf_iovec;
68 		struct {
69 			union {
70 				/* use iter_iov() to get the current vec */
71 				const struct iovec *__iov;
72 				const struct kvec *kvec;
73 				const struct bio_vec *bvec;
74 				struct xarray *xarray;
75 				void __user *ubuf;
76 			};
77 			size_t count;
78 		};
79 	};
80 	union {
81 		unsigned long nr_segs;
82 		loff_t xarray_start;
83 	};
84 };
85 
iter_iov(const struct iov_iter * iter)86 static inline const struct iovec *iter_iov(const struct iov_iter *iter)
87 {
88 	if (iter->iter_type == ITER_UBUF)
89 		return (const struct iovec *) &iter->__ubuf_iovec;
90 	return iter->__iov;
91 }
92 
93 #define iter_iov_addr(iter)	(iter_iov(iter)->iov_base + (iter)->iov_offset)
94 #define iter_iov_len(iter)	(iter_iov(iter)->iov_len - (iter)->iov_offset)
95 
iov_iter_type(const struct iov_iter * i)96 static inline enum iter_type iov_iter_type(const struct iov_iter *i)
97 {
98 	return i->iter_type;
99 }
100 
iov_iter_save_state(struct iov_iter * iter,struct iov_iter_state * state)101 static inline void iov_iter_save_state(struct iov_iter *iter,
102 				       struct iov_iter_state *state)
103 {
104 	state->iov_offset = iter->iov_offset;
105 	state->count = iter->count;
106 	state->nr_segs = iter->nr_segs;
107 }
108 
iter_is_ubuf(const struct iov_iter * i)109 static inline bool iter_is_ubuf(const struct iov_iter *i)
110 {
111 	return iov_iter_type(i) == ITER_UBUF;
112 }
113 
iter_is_iovec(const struct iov_iter * i)114 static inline bool iter_is_iovec(const struct iov_iter *i)
115 {
116 	return iov_iter_type(i) == ITER_IOVEC;
117 }
118 
iov_iter_is_kvec(const struct iov_iter * i)119 static inline bool iov_iter_is_kvec(const struct iov_iter *i)
120 {
121 	return iov_iter_type(i) == ITER_KVEC;
122 }
123 
iov_iter_is_bvec(const struct iov_iter * i)124 static inline bool iov_iter_is_bvec(const struct iov_iter *i)
125 {
126 	return iov_iter_type(i) == ITER_BVEC;
127 }
128 
iov_iter_is_discard(const struct iov_iter * i)129 static inline bool iov_iter_is_discard(const struct iov_iter *i)
130 {
131 	return iov_iter_type(i) == ITER_DISCARD;
132 }
133 
iov_iter_is_xarray(const struct iov_iter * i)134 static inline bool iov_iter_is_xarray(const struct iov_iter *i)
135 {
136 	return iov_iter_type(i) == ITER_XARRAY;
137 }
138 
iov_iter_rw(const struct iov_iter * i)139 static inline unsigned char iov_iter_rw(const struct iov_iter *i)
140 {
141 	return i->data_source ? WRITE : READ;
142 }
143 
user_backed_iter(const struct iov_iter * i)144 static inline bool user_backed_iter(const struct iov_iter *i)
145 {
146 	return i->user_backed;
147 }
148 
149 /*
150  * Total number of bytes covered by an iovec.
151  *
152  * NOTE that it is not safe to use this function until all the iovec's
153  * segment lengths have been validated.  Because the individual lengths can
154  * overflow a size_t when added together.
155  */
iov_length(const struct iovec * iov,unsigned long nr_segs)156 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
157 {
158 	unsigned long seg;
159 	size_t ret = 0;
160 
161 	for (seg = 0; seg < nr_segs; seg++)
162 		ret += iov[seg].iov_len;
163 	return ret;
164 }
165 
166 size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
167 				  size_t bytes, struct iov_iter *i);
168 void iov_iter_advance(struct iov_iter *i, size_t bytes);
169 void iov_iter_revert(struct iov_iter *i, size_t bytes);
170 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes);
171 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes);
172 size_t iov_iter_single_seg_count(const struct iov_iter *i);
173 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
174 			 struct iov_iter *i);
175 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
176 			 struct iov_iter *i);
177 
178 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
179 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
180 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
181 
copy_folio_to_iter(struct folio * folio,size_t offset,size_t bytes,struct iov_iter * i)182 static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
183 		size_t bytes, struct iov_iter *i)
184 {
185 	return copy_page_to_iter(&folio->page, offset, bytes, i);
186 }
187 
copy_folio_from_iter_atomic(struct folio * folio,size_t offset,size_t bytes,struct iov_iter * i)188 static inline size_t copy_folio_from_iter_atomic(struct folio *folio,
189 		size_t offset, size_t bytes, struct iov_iter *i)
190 {
191 	return copy_page_from_iter_atomic(&folio->page, offset, bytes, i);
192 }
193 
194 size_t copy_page_to_iter_nofault(struct page *page, unsigned offset,
195 				 size_t bytes, struct iov_iter *i);
196 
197 static __always_inline __must_check
copy_to_iter(const void * addr,size_t bytes,struct iov_iter * i)198 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
199 {
200 	if (check_copy_size(addr, bytes, true))
201 		return _copy_to_iter(addr, bytes, i);
202 	return 0;
203 }
204 
205 static __always_inline __must_check
copy_from_iter(void * addr,size_t bytes,struct iov_iter * i)206 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
207 {
208 	if (check_copy_size(addr, bytes, false))
209 		return _copy_from_iter(addr, bytes, i);
210 	return 0;
211 }
212 
213 static __always_inline __must_check
copy_from_iter_full(void * addr,size_t bytes,struct iov_iter * i)214 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
215 {
216 	size_t copied = copy_from_iter(addr, bytes, i);
217 	if (likely(copied == bytes))
218 		return true;
219 	iov_iter_revert(i, copied);
220 	return false;
221 }
222 
223 static __always_inline __must_check
copy_from_iter_nocache(void * addr,size_t bytes,struct iov_iter * i)224 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
225 {
226 	if (check_copy_size(addr, bytes, false))
227 		return _copy_from_iter_nocache(addr, bytes, i);
228 	return 0;
229 }
230 
231 static __always_inline __must_check
copy_from_iter_full_nocache(void * addr,size_t bytes,struct iov_iter * i)232 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
233 {
234 	size_t copied = copy_from_iter_nocache(addr, bytes, i);
235 	if (likely(copied == bytes))
236 		return true;
237 	iov_iter_revert(i, copied);
238 	return false;
239 }
240 
241 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
242 /*
243  * Note, users like pmem that depend on the stricter semantics of
244  * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for
245  * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
246  * destination is flushed from the cache on return.
247  */
248 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
249 #else
250 #define _copy_from_iter_flushcache _copy_from_iter_nocache
251 #endif
252 
253 #ifdef CONFIG_ARCH_HAS_COPY_MC
254 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
iov_iter_set_copy_mc(struct iov_iter * i)255 static inline void iov_iter_set_copy_mc(struct iov_iter *i)
256 {
257 	i->copy_mc = true;
258 }
259 
iov_iter_is_copy_mc(const struct iov_iter * i)260 static inline bool iov_iter_is_copy_mc(const struct iov_iter *i)
261 {
262 	return i->copy_mc;
263 }
264 #else
265 #define _copy_mc_to_iter _copy_to_iter
iov_iter_set_copy_mc(struct iov_iter * i)266 static inline void iov_iter_set_copy_mc(struct iov_iter *i) { }
iov_iter_is_copy_mc(const struct iov_iter * i)267 static inline bool iov_iter_is_copy_mc(const struct iov_iter *i)
268 {
269 	return false;
270 }
271 #endif
272 
273 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
274 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
275 			unsigned len_mask);
276 unsigned long iov_iter_alignment(const struct iov_iter *i);
277 unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
278 void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
279 			unsigned long nr_segs, size_t count);
280 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
281 			unsigned long nr_segs, size_t count);
282 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
283 			unsigned long nr_segs, size_t count);
284 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
285 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
286 		     loff_t start, size_t count);
287 ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
288 			size_t maxsize, unsigned maxpages, size_t *start);
289 ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
290 			size_t maxsize, size_t *start);
291 int iov_iter_npages(const struct iov_iter *i, int maxpages);
292 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
293 
294 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
295 
iov_iter_count(const struct iov_iter * i)296 static inline size_t iov_iter_count(const struct iov_iter *i)
297 {
298 	return i->count;
299 }
300 
301 /*
302  * Cap the iov_iter by given limit; note that the second argument is
303  * *not* the new size - it's upper limit for such.  Passing it a value
304  * greater than the amount of data in iov_iter is fine - it'll just do
305  * nothing in that case.
306  */
iov_iter_truncate(struct iov_iter * i,u64 count)307 static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
308 {
309 	/*
310 	 * count doesn't have to fit in size_t - comparison extends both
311 	 * operands to u64 here and any value that would be truncated by
312 	 * conversion in assignement is by definition greater than all
313 	 * values of size_t, including old i->count.
314 	 */
315 	if (i->count > count)
316 		i->count = count;
317 }
318 
319 /*
320  * reexpand a previously truncated iterator; count must be no more than how much
321  * we had shrunk it.
322  */
iov_iter_reexpand(struct iov_iter * i,size_t count)323 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
324 {
325 	i->count = count;
326 }
327 
328 static inline int
iov_iter_npages_cap(struct iov_iter * i,int maxpages,size_t max_bytes)329 iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
330 {
331 	size_t shorted = 0;
332 	int npages;
333 
334 	if (iov_iter_count(i) > max_bytes) {
335 		shorted = iov_iter_count(i) - max_bytes;
336 		iov_iter_truncate(i, max_bytes);
337 	}
338 	npages = iov_iter_npages(i, maxpages);
339 	if (shorted)
340 		iov_iter_reexpand(i, iov_iter_count(i) + shorted);
341 
342 	return npages;
343 }
344 
345 struct csum_state {
346 	__wsum csum;
347 	size_t off;
348 };
349 
350 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
351 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
352 
353 static __always_inline __must_check
csum_and_copy_from_iter_full(void * addr,size_t bytes,__wsum * csum,struct iov_iter * i)354 bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
355 				  __wsum *csum, struct iov_iter *i)
356 {
357 	size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
358 	if (likely(copied == bytes))
359 		return true;
360 	iov_iter_revert(i, copied);
361 	return false;
362 }
363 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
364 		struct iov_iter *i);
365 
366 struct iovec *iovec_from_user(const struct iovec __user *uvector,
367 		unsigned long nr_segs, unsigned long fast_segs,
368 		struct iovec *fast_iov, bool compat);
369 ssize_t import_iovec(int type, const struct iovec __user *uvec,
370 		 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
371 		 struct iov_iter *i);
372 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
373 		 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
374 		 struct iov_iter *i, bool compat);
375 int import_single_range(int type, void __user *buf, size_t len,
376 		 struct iovec *iov, struct iov_iter *i);
377 int import_ubuf(int type, void __user *buf, size_t len, struct iov_iter *i);
378 
iov_iter_ubuf(struct iov_iter * i,unsigned int direction,void __user * buf,size_t count)379 static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
380 			void __user *buf, size_t count)
381 {
382 	WARN_ON(direction & ~(READ | WRITE));
383 	*i = (struct iov_iter) {
384 		.iter_type = ITER_UBUF,
385 		.copy_mc = false,
386 		.user_backed = true,
387 		.data_source = direction,
388 		.ubuf = buf,
389 		.count = count,
390 		.nr_segs = 1
391 	};
392 }
393 /* Flags for iov_iter_get/extract_pages*() */
394 /* Allow P2PDMA on the extracted pages */
395 #define ITER_ALLOW_P2PDMA	((__force iov_iter_extraction_t)0x01)
396 
397 ssize_t iov_iter_extract_pages(struct iov_iter *i, struct page ***pages,
398 			       size_t maxsize, unsigned int maxpages,
399 			       iov_iter_extraction_t extraction_flags,
400 			       size_t *offset0);
401 
402 /**
403  * iov_iter_extract_will_pin - Indicate how pages from the iterator will be retained
404  * @iter: The iterator
405  *
406  * Examine the iterator and indicate by returning true or false as to how, if
407  * at all, pages extracted from the iterator will be retained by the extraction
408  * function.
409  *
410  * %true indicates that the pages will have a pin placed in them that the
411  * caller must unpin.  This is must be done for DMA/async DIO to force fork()
412  * to forcibly copy a page for the child (the parent must retain the original
413  * page).
414  *
415  * %false indicates that no measures are taken and that it's up to the caller
416  * to retain the pages.
417  */
iov_iter_extract_will_pin(const struct iov_iter * iter)418 static inline bool iov_iter_extract_will_pin(const struct iov_iter *iter)
419 {
420 	return user_backed_iter(iter);
421 }
422 
423 struct sg_table;
424 ssize_t extract_iter_to_sg(struct iov_iter *iter, size_t len,
425 			   struct sg_table *sgtable, unsigned int sg_max,
426 			   iov_iter_extraction_t extraction_flags);
427 
428 #endif
429