xref: /openbmc/linux/include/linux/uio.h (revision fa8c052b)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  *	Berkeley style UIO structures	-	Alan Cox 1994.
4  */
5 #ifndef __LINUX_UIO_H
6 #define __LINUX_UIO_H
7 
8 #include <linux/kernel.h>
9 #include <linux/thread_info.h>
10 #include <linux/mm_types.h>
11 #include <uapi/linux/uio.h>
12 
13 struct page;
14 struct pipe_inode_info;
15 
16 typedef unsigned int __bitwise iov_iter_extraction_t;
17 
18 struct kvec {
19 	void *iov_base; /* and that should *never* hold a userland pointer */
20 	size_t iov_len;
21 };
22 
23 enum iter_type {
24 	/* iter types */
25 	ITER_IOVEC,
26 	ITER_KVEC,
27 	ITER_BVEC,
28 	ITER_PIPE,
29 	ITER_XARRAY,
30 	ITER_DISCARD,
31 	ITER_UBUF,
32 };
33 
34 #define ITER_SOURCE	1	// == WRITE
35 #define ITER_DEST	0	// == READ
36 
37 struct iov_iter_state {
38 	size_t iov_offset;
39 	size_t count;
40 	unsigned long nr_segs;
41 };
42 
43 struct iov_iter {
44 	u8 iter_type;
45 	bool nofault;
46 	bool data_source;
47 	bool user_backed;
48 	union {
49 		size_t iov_offset;
50 		int last_offset;
51 	};
52 	size_t count;
53 	union {
54 		const struct iovec *iov;
55 		const struct kvec *kvec;
56 		const struct bio_vec *bvec;
57 		struct xarray *xarray;
58 		struct pipe_inode_info *pipe;
59 		void __user *ubuf;
60 	};
61 	union {
62 		unsigned long nr_segs;
63 		struct {
64 			unsigned int head;
65 			unsigned int start_head;
66 		};
67 		loff_t xarray_start;
68 	};
69 };
70 
71 static inline enum iter_type iov_iter_type(const struct iov_iter *i)
72 {
73 	return i->iter_type;
74 }
75 
76 static inline void iov_iter_save_state(struct iov_iter *iter,
77 				       struct iov_iter_state *state)
78 {
79 	state->iov_offset = iter->iov_offset;
80 	state->count = iter->count;
81 	state->nr_segs = iter->nr_segs;
82 }
83 
84 static inline bool iter_is_ubuf(const struct iov_iter *i)
85 {
86 	return iov_iter_type(i) == ITER_UBUF;
87 }
88 
89 static inline bool iter_is_iovec(const struct iov_iter *i)
90 {
91 	return iov_iter_type(i) == ITER_IOVEC;
92 }
93 
94 static inline bool iov_iter_is_kvec(const struct iov_iter *i)
95 {
96 	return iov_iter_type(i) == ITER_KVEC;
97 }
98 
99 static inline bool iov_iter_is_bvec(const struct iov_iter *i)
100 {
101 	return iov_iter_type(i) == ITER_BVEC;
102 }
103 
104 static inline bool iov_iter_is_pipe(const struct iov_iter *i)
105 {
106 	return iov_iter_type(i) == ITER_PIPE;
107 }
108 
109 static inline bool iov_iter_is_discard(const struct iov_iter *i)
110 {
111 	return iov_iter_type(i) == ITER_DISCARD;
112 }
113 
114 static inline bool iov_iter_is_xarray(const struct iov_iter *i)
115 {
116 	return iov_iter_type(i) == ITER_XARRAY;
117 }
118 
119 static inline unsigned char iov_iter_rw(const struct iov_iter *i)
120 {
121 	return i->data_source ? WRITE : READ;
122 }
123 
124 static inline bool user_backed_iter(const struct iov_iter *i)
125 {
126 	return i->user_backed;
127 }
128 
129 /*
130  * Total number of bytes covered by an iovec.
131  *
132  * NOTE that it is not safe to use this function until all the iovec's
133  * segment lengths have been validated.  Because the individual lengths can
134  * overflow a size_t when added together.
135  */
136 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
137 {
138 	unsigned long seg;
139 	size_t ret = 0;
140 
141 	for (seg = 0; seg < nr_segs; seg++)
142 		ret += iov[seg].iov_len;
143 	return ret;
144 }
145 
146 static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
147 {
148 	return (struct iovec) {
149 		.iov_base = iter->iov->iov_base + iter->iov_offset,
150 		.iov_len = min(iter->count,
151 			       iter->iov->iov_len - iter->iov_offset),
152 	};
153 }
154 
155 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset,
156 				  size_t bytes, struct iov_iter *i);
157 void iov_iter_advance(struct iov_iter *i, size_t bytes);
158 void iov_iter_revert(struct iov_iter *i, size_t bytes);
159 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes);
160 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes);
161 size_t iov_iter_single_seg_count(const struct iov_iter *i);
162 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
163 			 struct iov_iter *i);
164 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
165 			 struct iov_iter *i);
166 
167 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
168 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
169 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
170 
171 static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
172 		size_t bytes, struct iov_iter *i)
173 {
174 	return copy_page_to_iter(&folio->page, offset, bytes, i);
175 }
176 
177 static __always_inline __must_check
178 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
179 {
180 	if (check_copy_size(addr, bytes, true))
181 		return _copy_to_iter(addr, bytes, i);
182 	return 0;
183 }
184 
185 static __always_inline __must_check
186 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
187 {
188 	if (check_copy_size(addr, bytes, false))
189 		return _copy_from_iter(addr, bytes, i);
190 	return 0;
191 }
192 
193 static __always_inline __must_check
194 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
195 {
196 	size_t copied = copy_from_iter(addr, bytes, i);
197 	if (likely(copied == bytes))
198 		return true;
199 	iov_iter_revert(i, copied);
200 	return false;
201 }
202 
203 static __always_inline __must_check
204 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
205 {
206 	if (check_copy_size(addr, bytes, false))
207 		return _copy_from_iter_nocache(addr, bytes, i);
208 	return 0;
209 }
210 
211 static __always_inline __must_check
212 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
213 {
214 	size_t copied = copy_from_iter_nocache(addr, bytes, i);
215 	if (likely(copied == bytes))
216 		return true;
217 	iov_iter_revert(i, copied);
218 	return false;
219 }
220 
221 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
222 /*
223  * Note, users like pmem that depend on the stricter semantics of
224  * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for
225  * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
226  * destination is flushed from the cache on return.
227  */
228 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
229 #else
230 #define _copy_from_iter_flushcache _copy_from_iter_nocache
231 #endif
232 
233 #ifdef CONFIG_ARCH_HAS_COPY_MC
234 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
235 #else
236 #define _copy_mc_to_iter _copy_to_iter
237 #endif
238 
239 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
240 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
241 			unsigned len_mask);
242 unsigned long iov_iter_alignment(const struct iov_iter *i);
243 unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
244 void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
245 			unsigned long nr_segs, size_t count);
246 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
247 			unsigned long nr_segs, size_t count);
248 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
249 			unsigned long nr_segs, size_t count);
250 void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
251 			size_t count);
252 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
253 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
254 		     loff_t start, size_t count);
255 ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
256 		size_t maxsize, unsigned maxpages, size_t *start,
257 		iov_iter_extraction_t extraction_flags);
258 ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
259 			size_t maxsize, unsigned maxpages, size_t *start);
260 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
261 		struct page ***pages, size_t maxsize, size_t *start,
262 		iov_iter_extraction_t extraction_flags);
263 ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
264 			size_t maxsize, size_t *start);
265 int iov_iter_npages(const struct iov_iter *i, int maxpages);
266 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
267 
268 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
269 
270 static inline size_t iov_iter_count(const struct iov_iter *i)
271 {
272 	return i->count;
273 }
274 
275 /*
276  * Cap the iov_iter by given limit; note that the second argument is
277  * *not* the new size - it's upper limit for such.  Passing it a value
278  * greater than the amount of data in iov_iter is fine - it'll just do
279  * nothing in that case.
280  */
281 static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
282 {
283 	/*
284 	 * count doesn't have to fit in size_t - comparison extends both
285 	 * operands to u64 here and any value that would be truncated by
286 	 * conversion in assignement is by definition greater than all
287 	 * values of size_t, including old i->count.
288 	 */
289 	if (i->count > count)
290 		i->count = count;
291 }
292 
293 /*
294  * reexpand a previously truncated iterator; count must be no more than how much
295  * we had shrunk it.
296  */
297 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
298 {
299 	i->count = count;
300 }
301 
302 static inline int
303 iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
304 {
305 	size_t shorted = 0;
306 	int npages;
307 
308 	if (iov_iter_count(i) > max_bytes) {
309 		shorted = iov_iter_count(i) - max_bytes;
310 		iov_iter_truncate(i, max_bytes);
311 	}
312 	npages = iov_iter_npages(i, maxpages);
313 	if (shorted)
314 		iov_iter_reexpand(i, iov_iter_count(i) + shorted);
315 
316 	return npages;
317 }
318 
319 struct csum_state {
320 	__wsum csum;
321 	size_t off;
322 };
323 
324 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
325 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
326 
327 static __always_inline __must_check
328 bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
329 				  __wsum *csum, struct iov_iter *i)
330 {
331 	size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
332 	if (likely(copied == bytes))
333 		return true;
334 	iov_iter_revert(i, copied);
335 	return false;
336 }
337 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
338 		struct iov_iter *i);
339 
340 struct iovec *iovec_from_user(const struct iovec __user *uvector,
341 		unsigned long nr_segs, unsigned long fast_segs,
342 		struct iovec *fast_iov, bool compat);
343 ssize_t import_iovec(int type, const struct iovec __user *uvec,
344 		 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
345 		 struct iov_iter *i);
346 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
347 		 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
348 		 struct iov_iter *i, bool compat);
349 int import_single_range(int type, void __user *buf, size_t len,
350 		 struct iovec *iov, struct iov_iter *i);
351 int import_ubuf(int type, void __user *buf, size_t len, struct iov_iter *i);
352 
353 static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
354 			void __user *buf, size_t count)
355 {
356 	WARN_ON(direction & ~(READ | WRITE));
357 	*i = (struct iov_iter) {
358 		.iter_type = ITER_UBUF,
359 		.user_backed = true,
360 		.data_source = direction,
361 		.ubuf = buf,
362 		.count = count
363 	};
364 }
365 /* Flags for iov_iter_get/extract_pages*() */
366 /* Allow P2PDMA on the extracted pages */
367 #define ITER_ALLOW_P2PDMA	((__force iov_iter_extraction_t)0x01)
368 
369 ssize_t iov_iter_extract_pages(struct iov_iter *i, struct page ***pages,
370 			       size_t maxsize, unsigned int maxpages,
371 			       iov_iter_extraction_t extraction_flags,
372 			       size_t *offset0);
373 
374 /**
375  * iov_iter_extract_will_pin - Indicate how pages from the iterator will be retained
376  * @iter: The iterator
377  *
378  * Examine the iterator and indicate by returning true or false as to how, if
379  * at all, pages extracted from the iterator will be retained by the extraction
380  * function.
381  *
382  * %true indicates that the pages will have a pin placed in them that the
383  * caller must unpin.  This is must be done for DMA/async DIO to force fork()
384  * to forcibly copy a page for the child (the parent must retain the original
385  * page).
386  *
387  * %false indicates that no measures are taken and that it's up to the caller
388  * to retain the pages.
389  */
390 static inline bool iov_iter_extract_will_pin(const struct iov_iter *iter)
391 {
392 	return user_backed_iter(iter);
393 }
394 
395 #endif
396