xref: /openbmc/linux/include/linux/uaccess.h (revision acd46a6b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_UACCESS_H__
3 #define __LINUX_UACCESS_H__
4 
5 #include <linux/instrumented.h>
6 #include <linux/sched.h>
7 #include <linux/thread_info.h>
8 
9 #define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
10 
11 #include <asm/uaccess.h>
12 
13 /*
14  * Architectures should provide two primitives (raw_copy_{to,from}_user())
15  * and get rid of their private instances of copy_{to,from}_user() and
16  * __copy_{to,from}_user{,_inatomic}().
17  *
18  * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
19  * return the amount left to copy.  They should assume that access_ok() has
20  * already been checked (and succeeded); they should *not* zero-pad anything.
21  * No KASAN or object size checks either - those belong here.
22  *
23  * Both of these functions should attempt to copy size bytes starting at from
24  * into the area starting at to.  They must not fetch or store anything
25  * outside of those areas.  Return value must be between 0 (everything
26  * copied successfully) and size (nothing copied).
27  *
28  * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
29  * at to must become equal to the bytes fetched from the corresponding area
30  * starting at from.  All data past to + size - N must be left unmodified.
31  *
32  * If copying succeeds, the return value must be 0.  If some data cannot be
33  * fetched, it is permitted to copy less than had been fetched; the only
34  * hard requirement is that not storing anything at all (i.e. returning size)
35  * should happen only when nothing could be copied.  In other words, you don't
36  * have to squeeze as much as possible - it is allowed, but not necessary.
37  *
38  * For raw_copy_from_user() to always points to kernel memory and no faults
39  * on store should happen.  Interpretation of from is affected by set_fs().
40  * For raw_copy_to_user() it's the other way round.
41  *
42  * Both can be inlined - it's up to architectures whether it wants to bother
43  * with that.  They should not be used directly; they are used to implement
44  * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
45  * that are used instead.  Out of those, __... ones are inlined.  Plain
46  * copy_{to,from}_user() might or might not be inlined.  If you want them
47  * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
48  *
49  * NOTE: only copy_from_user() zero-pads the destination in case of short copy.
50  * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
51  * at all; their callers absolutely must check the return value.
52  *
53  * Biarch ones should also provide raw_copy_in_user() - similar to the above,
54  * but both source and destination are __user pointers (affected by set_fs()
55  * as usual) and both source and destination can trigger faults.
56  */
57 
58 static __always_inline __must_check unsigned long
59 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
60 {
61 	instrument_copy_from_user(to, from, n);
62 	check_object_size(to, n, false);
63 	return raw_copy_from_user(to, from, n);
64 }
65 
66 static __always_inline __must_check unsigned long
67 __copy_from_user(void *to, const void __user *from, unsigned long n)
68 {
69 	might_fault();
70 	instrument_copy_from_user(to, from, n);
71 	check_object_size(to, n, false);
72 	return raw_copy_from_user(to, from, n);
73 }
74 
75 /**
76  * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
77  * @to:   Destination address, in user space.
78  * @from: Source address, in kernel space.
79  * @n:    Number of bytes to copy.
80  *
81  * Context: User context only.
82  *
83  * Copy data from kernel space to user space.  Caller must check
84  * the specified block with access_ok() before calling this function.
85  * The caller should also make sure he pins the user space address
86  * so that we don't result in page fault and sleep.
87  */
88 static __always_inline __must_check unsigned long
89 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
90 {
91 	instrument_copy_to_user(to, from, n);
92 	check_object_size(from, n, true);
93 	return raw_copy_to_user(to, from, n);
94 }
95 
96 static __always_inline __must_check unsigned long
97 __copy_to_user(void __user *to, const void *from, unsigned long n)
98 {
99 	might_fault();
100 	instrument_copy_to_user(to, from, n);
101 	check_object_size(from, n, true);
102 	return raw_copy_to_user(to, from, n);
103 }
104 
105 #ifdef INLINE_COPY_FROM_USER
106 static inline __must_check unsigned long
107 _copy_from_user(void *to, const void __user *from, unsigned long n)
108 {
109 	unsigned long res = n;
110 	might_fault();
111 	if (likely(access_ok(from, n))) {
112 		instrument_copy_from_user(to, from, n);
113 		res = raw_copy_from_user(to, from, n);
114 	}
115 	if (unlikely(res))
116 		memset(to + (n - res), 0, res);
117 	return res;
118 }
119 #else
120 extern __must_check unsigned long
121 _copy_from_user(void *, const void __user *, unsigned long);
122 #endif
123 
124 #ifdef INLINE_COPY_TO_USER
125 static inline __must_check unsigned long
126 _copy_to_user(void __user *to, const void *from, unsigned long n)
127 {
128 	might_fault();
129 	if (access_ok(to, n)) {
130 		instrument_copy_to_user(to, from, n);
131 		n = raw_copy_to_user(to, from, n);
132 	}
133 	return n;
134 }
135 #else
136 extern __must_check unsigned long
137 _copy_to_user(void __user *, const void *, unsigned long);
138 #endif
139 
140 static __always_inline unsigned long __must_check
141 copy_from_user(void *to, const void __user *from, unsigned long n)
142 {
143 	if (likely(check_copy_size(to, n, false)))
144 		n = _copy_from_user(to, from, n);
145 	return n;
146 }
147 
148 static __always_inline unsigned long __must_check
149 copy_to_user(void __user *to, const void *from, unsigned long n)
150 {
151 	if (likely(check_copy_size(from, n, true)))
152 		n = _copy_to_user(to, from, n);
153 	return n;
154 }
155 #ifdef CONFIG_COMPAT
156 static __always_inline unsigned long __must_check
157 copy_in_user(void __user *to, const void __user *from, unsigned long n)
158 {
159 	might_fault();
160 	if (access_ok(to, n) && access_ok(from, n))
161 		n = raw_copy_in_user(to, from, n);
162 	return n;
163 }
164 #endif
165 
166 static __always_inline void pagefault_disabled_inc(void)
167 {
168 	current->pagefault_disabled++;
169 }
170 
171 static __always_inline void pagefault_disabled_dec(void)
172 {
173 	current->pagefault_disabled--;
174 }
175 
176 /*
177  * These routines enable/disable the pagefault handler. If disabled, it will
178  * not take any locks and go straight to the fixup table.
179  *
180  * User access methods will not sleep when called from a pagefault_disabled()
181  * environment.
182  */
183 static inline void pagefault_disable(void)
184 {
185 	pagefault_disabled_inc();
186 	/*
187 	 * make sure to have issued the store before a pagefault
188 	 * can hit.
189 	 */
190 	barrier();
191 }
192 
193 static inline void pagefault_enable(void)
194 {
195 	/*
196 	 * make sure to issue those last loads/stores before enabling
197 	 * the pagefault handler again.
198 	 */
199 	barrier();
200 	pagefault_disabled_dec();
201 }
202 
203 /*
204  * Is the pagefault handler disabled? If so, user access methods will not sleep.
205  */
206 static inline bool pagefault_disabled(void)
207 {
208 	return current->pagefault_disabled != 0;
209 }
210 
211 /*
212  * The pagefault handler is in general disabled by pagefault_disable() or
213  * when in irq context (via in_atomic()).
214  *
215  * This function should only be used by the fault handlers. Other users should
216  * stick to pagefault_disabled().
217  * Please NEVER use preempt_disable() to disable the fault handler. With
218  * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
219  * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
220  */
221 #define faulthandler_disabled() (pagefault_disabled() || in_atomic())
222 
223 #ifndef ARCH_HAS_NOCACHE_UACCESS
224 
225 static inline __must_check unsigned long
226 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
227 				  unsigned long n)
228 {
229 	return __copy_from_user_inatomic(to, from, n);
230 }
231 
232 #endif		/* ARCH_HAS_NOCACHE_UACCESS */
233 
234 extern __must_check int check_zeroed_user(const void __user *from, size_t size);
235 
236 /**
237  * copy_struct_from_user: copy a struct from userspace
238  * @dst:   Destination address, in kernel space. This buffer must be @ksize
239  *         bytes long.
240  * @ksize: Size of @dst struct.
241  * @src:   Source address, in userspace.
242  * @usize: (Alleged) size of @src struct.
243  *
244  * Copies a struct from userspace to kernel space, in a way that guarantees
245  * backwards-compatibility for struct syscall arguments (as long as future
246  * struct extensions are made such that all new fields are *appended* to the
247  * old struct, and zeroed-out new fields have the same meaning as the old
248  * struct).
249  *
250  * @ksize is just sizeof(*dst), and @usize should've been passed by userspace.
251  * The recommended usage is something like the following:
252  *
253  *   SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize)
254  *   {
255  *      int err;
256  *      struct foo karg = {};
257  *
258  *      if (usize > PAGE_SIZE)
259  *        return -E2BIG;
260  *      if (usize < FOO_SIZE_VER0)
261  *        return -EINVAL;
262  *
263  *      err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
264  *      if (err)
265  *        return err;
266  *
267  *      // ...
268  *   }
269  *
270  * There are three cases to consider:
271  *  * If @usize == @ksize, then it's copied verbatim.
272  *  * If @usize < @ksize, then the userspace has passed an old struct to a
273  *    newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize)
274  *    are to be zero-filled.
275  *  * If @usize > @ksize, then the userspace has passed a new struct to an
276  *    older kernel. The trailing bytes unknown to the kernel (@usize - @ksize)
277  *    are checked to ensure they are zeroed, otherwise -E2BIG is returned.
278  *
279  * Returns (in all cases, some data may have been copied):
280  *  * -E2BIG:  (@usize > @ksize) and there are non-zero trailing bytes in @src.
281  *  * -EFAULT: access to userspace failed.
282  */
283 static __always_inline __must_check int
284 copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
285 		      size_t usize)
286 {
287 	size_t size = min(ksize, usize);
288 	size_t rest = max(ksize, usize) - size;
289 
290 	/* Deal with trailing bytes. */
291 	if (usize < ksize) {
292 		memset(dst + size, 0, rest);
293 	} else if (usize > ksize) {
294 		int ret = check_zeroed_user(src + size, rest);
295 		if (ret <= 0)
296 			return ret ?: -E2BIG;
297 	}
298 	/* Copy the interoperable parts of the struct. */
299 	if (copy_from_user(dst, src, size))
300 		return -EFAULT;
301 	return 0;
302 }
303 
304 bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);
305 
306 long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
307 long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size);
308 
309 long copy_from_user_nofault(void *dst, const void __user *src, size_t size);
310 long notrace copy_to_user_nofault(void __user *dst, const void *src,
311 		size_t size);
312 
313 long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
314 		long count);
315 
316 long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
317 		long count);
318 long strnlen_user_nofault(const void __user *unsafe_addr, long count);
319 
320 /**
321  * get_kernel_nofault(): safely attempt to read from a location
322  * @val: read into this variable
323  * @ptr: address to read from
324  *
325  * Returns 0 on success, or -EFAULT.
326  */
327 #define get_kernel_nofault(val, ptr) ({				\
328 	const typeof(val) *__gk_ptr = (ptr);			\
329 	copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
330 })
331 
332 #ifndef user_access_begin
333 #define user_access_begin(ptr,len) access_ok(ptr, len)
334 #define user_access_end() do { } while (0)
335 #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
336 #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
337 #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
338 #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
339 static inline unsigned long user_access_save(void) { return 0UL; }
340 static inline void user_access_restore(unsigned long flags) { }
341 #endif
342 #ifndef user_write_access_begin
343 #define user_write_access_begin user_access_begin
344 #define user_write_access_end user_access_end
345 #endif
346 #ifndef user_read_access_begin
347 #define user_read_access_begin user_access_begin
348 #define user_read_access_end user_access_end
349 #endif
350 
351 #ifdef CONFIG_HARDENED_USERCOPY
352 void usercopy_warn(const char *name, const char *detail, bool to_user,
353 		   unsigned long offset, unsigned long len);
354 void __noreturn usercopy_abort(const char *name, const char *detail,
355 			       bool to_user, unsigned long offset,
356 			       unsigned long len);
357 #endif
358 
359 #endif		/* __LINUX_UACCESS_H__ */
360