xref: /openbmc/linux/include/linux/uio.h (revision 31e67366)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  *	Berkeley style UIO structures	-	Alan Cox 1994.
4  */
5 #ifndef __LINUX_UIO_H
6 #define __LINUX_UIO_H
7 
8 #include <linux/kernel.h>
9 #include <linux/thread_info.h>
10 #include <uapi/linux/uio.h>
11 
12 struct page;
13 struct pipe_inode_info;
14 
15 struct kvec {
16 	void *iov_base; /* and that should *never* hold a userland pointer */
17 	size_t iov_len;
18 };
19 
20 enum iter_type {
21 	/* iter types */
22 	ITER_IOVEC = 4,
23 	ITER_KVEC = 8,
24 	ITER_BVEC = 16,
25 	ITER_PIPE = 32,
26 	ITER_DISCARD = 64,
27 };
28 
29 struct iov_iter {
30 	/*
31 	 * Bit 0 is the read/write bit, set if we're writing.
32 	 * Bit 1 is the BVEC_FLAG_NO_REF bit, set if type is a bvec and
33 	 * the caller isn't expecting to drop a page reference when done.
34 	 */
35 	unsigned int type;
36 	size_t iov_offset;
37 	size_t count;
38 	union {
39 		const struct iovec *iov;
40 		const struct kvec *kvec;
41 		const struct bio_vec *bvec;
42 		struct pipe_inode_info *pipe;
43 	};
44 	union {
45 		unsigned long nr_segs;
46 		struct {
47 			unsigned int head;
48 			unsigned int start_head;
49 		};
50 	};
51 };
52 
53 static inline enum iter_type iov_iter_type(const struct iov_iter *i)
54 {
55 	return i->type & ~(READ | WRITE);
56 }
57 
58 static inline bool iter_is_iovec(const struct iov_iter *i)
59 {
60 	return iov_iter_type(i) == ITER_IOVEC;
61 }
62 
63 static inline bool iov_iter_is_kvec(const struct iov_iter *i)
64 {
65 	return iov_iter_type(i) == ITER_KVEC;
66 }
67 
68 static inline bool iov_iter_is_bvec(const struct iov_iter *i)
69 {
70 	return iov_iter_type(i) == ITER_BVEC;
71 }
72 
73 static inline bool iov_iter_is_pipe(const struct iov_iter *i)
74 {
75 	return iov_iter_type(i) == ITER_PIPE;
76 }
77 
78 static inline bool iov_iter_is_discard(const struct iov_iter *i)
79 {
80 	return iov_iter_type(i) == ITER_DISCARD;
81 }
82 
83 static inline unsigned char iov_iter_rw(const struct iov_iter *i)
84 {
85 	return i->type & (READ | WRITE);
86 }
87 
88 /*
89  * Total number of bytes covered by an iovec.
90  *
91  * NOTE that it is not safe to use this function until all the iovec's
92  * segment lengths have been validated.  Because the individual lengths can
93  * overflow a size_t when added together.
94  */
95 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
96 {
97 	unsigned long seg;
98 	size_t ret = 0;
99 
100 	for (seg = 0; seg < nr_segs; seg++)
101 		ret += iov[seg].iov_len;
102 	return ret;
103 }
104 
105 static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
106 {
107 	return (struct iovec) {
108 		.iov_base = iter->iov->iov_base + iter->iov_offset,
109 		.iov_len = min(iter->count,
110 			       iter->iov->iov_len - iter->iov_offset),
111 	};
112 }
113 
114 size_t iov_iter_copy_from_user_atomic(struct page *page,
115 		struct iov_iter *i, unsigned long offset, size_t bytes);
116 void iov_iter_advance(struct iov_iter *i, size_t bytes);
117 void iov_iter_revert(struct iov_iter *i, size_t bytes);
118 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
119 size_t iov_iter_single_seg_count(const struct iov_iter *i);
120 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
121 			 struct iov_iter *i);
122 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
123 			 struct iov_iter *i);
124 
125 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
126 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
127 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
128 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
129 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
130 
131 static __always_inline __must_check
132 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
133 {
134 	if (unlikely(!check_copy_size(addr, bytes, true)))
135 		return 0;
136 	else
137 		return _copy_to_iter(addr, bytes, i);
138 }
139 
140 static __always_inline __must_check
141 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
142 {
143 	if (unlikely(!check_copy_size(addr, bytes, false)))
144 		return 0;
145 	else
146 		return _copy_from_iter(addr, bytes, i);
147 }
148 
149 static __always_inline __must_check
150 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
151 {
152 	if (unlikely(!check_copy_size(addr, bytes, false)))
153 		return false;
154 	else
155 		return _copy_from_iter_full(addr, bytes, i);
156 }
157 
158 static __always_inline __must_check
159 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
160 {
161 	if (unlikely(!check_copy_size(addr, bytes, false)))
162 		return 0;
163 	else
164 		return _copy_from_iter_nocache(addr, bytes, i);
165 }
166 
167 static __always_inline __must_check
168 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
169 {
170 	if (unlikely(!check_copy_size(addr, bytes, false)))
171 		return false;
172 	else
173 		return _copy_from_iter_full_nocache(addr, bytes, i);
174 }
175 
176 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
177 /*
178  * Note, users like pmem that depend on the stricter semantics of
179  * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
180  * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
181  * destination is flushed from the cache on return.
182  */
183 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
184 #else
185 #define _copy_from_iter_flushcache _copy_from_iter_nocache
186 #endif
187 
188 #ifdef CONFIG_ARCH_HAS_COPY_MC
189 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
190 #else
191 #define _copy_mc_to_iter _copy_to_iter
192 #endif
193 
194 static __always_inline __must_check
195 size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
196 {
197 	if (unlikely(!check_copy_size(addr, bytes, false)))
198 		return 0;
199 	else
200 		return _copy_from_iter_flushcache(addr, bytes, i);
201 }
202 
203 static __always_inline __must_check
204 size_t copy_mc_to_iter(void *addr, size_t bytes, struct iov_iter *i)
205 {
206 	if (unlikely(!check_copy_size(addr, bytes, true)))
207 		return 0;
208 	else
209 		return _copy_mc_to_iter(addr, bytes, i);
210 }
211 
212 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
213 unsigned long iov_iter_alignment(const struct iov_iter *i);
214 unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
215 void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
216 			unsigned long nr_segs, size_t count);
217 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
218 			unsigned long nr_segs, size_t count);
219 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
220 			unsigned long nr_segs, size_t count);
221 void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
222 			size_t count);
223 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
224 ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
225 			size_t maxsize, unsigned maxpages, size_t *start);
226 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
227 			size_t maxsize, size_t *start);
228 int iov_iter_npages(const struct iov_iter *i, int maxpages);
229 
230 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
231 
232 static inline size_t iov_iter_count(const struct iov_iter *i)
233 {
234 	return i->count;
235 }
236 
237 /*
238  * Cap the iov_iter by given limit; note that the second argument is
239  * *not* the new size - it's upper limit for such.  Passing it a value
240  * greater than the amount of data in iov_iter is fine - it'll just do
241  * nothing in that case.
242  */
243 static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
244 {
245 	/*
246 	 * count doesn't have to fit in size_t - comparison extends both
247 	 * operands to u64 here and any value that would be truncated by
248 	 * conversion in assignement is by definition greater than all
249 	 * values of size_t, including old i->count.
250 	 */
251 	if (i->count > count)
252 		i->count = count;
253 }
254 
255 /*
256  * reexpand a previously truncated iterator; count must be no more than how much
257  * we had shrunk it.
258  */
259 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
260 {
261 	i->count = count;
262 }
263 
264 struct csum_state {
265 	__wsum csum;
266 	size_t off;
267 };
268 
269 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
270 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
271 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
272 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
273 		struct iov_iter *i);
274 
275 struct iovec *iovec_from_user(const struct iovec __user *uvector,
276 		unsigned long nr_segs, unsigned long fast_segs,
277 		struct iovec *fast_iov, bool compat);
278 ssize_t import_iovec(int type, const struct iovec __user *uvec,
279 		 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
280 		 struct iov_iter *i);
281 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
282 		 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
283 		 struct iov_iter *i, bool compat);
284 int import_single_range(int type, void __user *buf, size_t len,
285 		 struct iovec *iov, struct iov_iter *i);
286 
287 int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
288 			    int (*f)(struct kvec *vec, void *context),
289 			    void *context);
290 
291 #endif
292