xref: /openbmc/linux/arch/riscv/include/asm/uaccess.h (revision 81de3bf3)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 Regents of the University of California
4  *
5  * This file was copied from include/asm-generic/uaccess.h
6  */
7 
8 #ifndef _ASM_RISCV_UACCESS_H
9 #define _ASM_RISCV_UACCESS_H
10 
11 /*
12  * User space memory access functions
13  */
14 #ifdef CONFIG_MMU
15 #include <linux/errno.h>
16 #include <linux/compiler.h>
17 #include <linux/thread_info.h>
18 #include <asm/byteorder.h>
19 #include <asm/extable.h>
20 #include <asm/asm.h>
21 
22 #define __enable_user_access()							\
23 	__asm__ __volatile__ ("csrs sstatus, %0" : : "r" (SR_SUM) : "memory")
24 #define __disable_user_access()							\
25 	__asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory")
26 
27 /*
28  * The fs value determines whether argument validity checking should be
29  * performed or not.  If get_fs() == USER_DS, checking is performed, with
30  * get_fs() == KERNEL_DS, checking is bypassed.
31  *
32  * For historical reasons, these macros are grossly misnamed.
33  */
34 
35 #define MAKE_MM_SEG(s)	((mm_segment_t) { (s) })
36 
37 #define KERNEL_DS	MAKE_MM_SEG(~0UL)
38 #define USER_DS		MAKE_MM_SEG(TASK_SIZE)
39 
40 #define get_fs()	(current_thread_info()->addr_limit)
41 
42 static inline void set_fs(mm_segment_t fs)
43 {
44 	current_thread_info()->addr_limit = fs;
45 }
46 
47 #define segment_eq(a, b) ((a).seg == (b).seg)
48 
49 #define user_addr_max()	(get_fs().seg)
50 
51 
52 /**
53  * access_ok: - Checks if a user space pointer is valid
54  * @addr: User space pointer to start of block to check
55  * @size: Size of block to check
56  *
57  * Context: User context only.  This function may sleep.
58  *
59  * Checks if a pointer to a block of memory in user space is valid.
60  *
61  * Returns true (nonzero) if the memory block may be valid, false (zero)
62  * if it is definitely invalid.
63  *
64  * Note that, depending on architecture, this function probably just
65  * checks that the pointer is in the user space range - after calling
66  * this function, memory access functions may still return -EFAULT.
67  */
68 #define access_ok(addr, size) ({					\
69 	__chk_user_ptr(addr);						\
70 	likely(__access_ok((unsigned long __force)(addr), (size)));	\
71 })
72 
73 /*
74  * Ensure that the range [addr, addr+size) is within the process's
75  * address space
76  */
77 static inline int __access_ok(unsigned long addr, unsigned long size)
78 {
79 	const mm_segment_t fs = get_fs();
80 
81 	return size <= fs.seg && addr <= fs.seg - size;
82 }
83 
84 /*
85  * The exception table consists of pairs of addresses: the first is the
86  * address of an instruction that is allowed to fault, and the second is
87  * the address at which the program should continue.  No registers are
88  * modified, so it is entirely up to the continuation code to figure out
89  * what to do.
90  *
91  * All the routines below use bits of fixup code that are out of line
92  * with the main instruction path.  This means when everything is well,
93  * we don't even have to jump over them.  Further, they do not intrude
94  * on our cache or tlb entries.
95  */
96 
97 #define __LSW	0
98 #define __MSW	1
99 
100 /*
101  * The "__xxx" versions of the user access functions do not verify the address
102  * space - it must have been done previously with a separate "access_ok()"
103  * call.
104  */
105 
106 #define __get_user_asm(insn, x, ptr, err)			\
107 do {								\
108 	uintptr_t __tmp;					\
109 	__typeof__(x) __x;					\
110 	__enable_user_access();					\
111 	__asm__ __volatile__ (					\
112 		"1:\n"						\
113 		"	" insn " %1, %3\n"			\
114 		"2:\n"						\
115 		"	.section .fixup,\"ax\"\n"		\
116 		"	.balign 4\n"				\
117 		"3:\n"						\
118 		"	li %0, %4\n"				\
119 		"	li %1, 0\n"				\
120 		"	jump 2b, %2\n"				\
121 		"	.previous\n"				\
122 		"	.section __ex_table,\"a\"\n"		\
123 		"	.balign " RISCV_SZPTR "\n"			\
124 		"	" RISCV_PTR " 1b, 3b\n"			\
125 		"	.previous"				\
126 		: "+r" (err), "=&r" (__x), "=r" (__tmp)		\
127 		: "m" (*(ptr)), "i" (-EFAULT));			\
128 	__disable_user_access();				\
129 	(x) = __x;						\
130 } while (0)
131 
132 #ifdef CONFIG_64BIT
133 #define __get_user_8(x, ptr, err) \
134 	__get_user_asm("ld", x, ptr, err)
135 #else /* !CONFIG_64BIT */
136 #define __get_user_8(x, ptr, err)				\
137 do {								\
138 	u32 __user *__ptr = (u32 __user *)(ptr);		\
139 	u32 __lo, __hi;						\
140 	uintptr_t __tmp;					\
141 	__enable_user_access();					\
142 	__asm__ __volatile__ (					\
143 		"1:\n"						\
144 		"	lw %1, %4\n"				\
145 		"2:\n"						\
146 		"	lw %2, %5\n"				\
147 		"3:\n"						\
148 		"	.section .fixup,\"ax\"\n"		\
149 		"	.balign 4\n"				\
150 		"4:\n"						\
151 		"	li %0, %6\n"				\
152 		"	li %1, 0\n"				\
153 		"	li %2, 0\n"				\
154 		"	jump 3b, %3\n"				\
155 		"	.previous\n"				\
156 		"	.section __ex_table,\"a\"\n"		\
157 		"	.balign " RISCV_SZPTR "\n"			\
158 		"	" RISCV_PTR " 1b, 4b\n"			\
159 		"	" RISCV_PTR " 2b, 4b\n"			\
160 		"	.previous"				\
161 		: "+r" (err), "=&r" (__lo), "=r" (__hi),	\
162 			"=r" (__tmp)				\
163 		: "m" (__ptr[__LSW]), "m" (__ptr[__MSW]),	\
164 			"i" (-EFAULT));				\
165 	__disable_user_access();				\
166 	(x) = (__typeof__(x))((__typeof__((x)-(x)))(		\
167 		(((u64)__hi << 32) | __lo)));			\
168 } while (0)
169 #endif /* CONFIG_64BIT */
170 
171 
172 /**
173  * __get_user: - Get a simple variable from user space, with less checking.
174  * @x:   Variable to store result.
175  * @ptr: Source address, in user space.
176  *
177  * Context: User context only.  This function may sleep.
178  *
179  * This macro copies a single simple variable from user space to kernel
180  * space.  It supports simple types like char and int, but not larger
181  * data types like structures or arrays.
182  *
183  * @ptr must have pointer-to-simple-variable type, and the result of
184  * dereferencing @ptr must be assignable to @x without a cast.
185  *
186  * Caller must check the pointer with access_ok() before calling this
187  * function.
188  *
189  * Returns zero on success, or -EFAULT on error.
190  * On error, the variable @x is set to zero.
191  */
192 #define __get_user(x, ptr)					\
193 ({								\
194 	register long __gu_err = 0;				\
195 	const __typeof__(*(ptr)) __user *__gu_ptr = (ptr);	\
196 	__chk_user_ptr(__gu_ptr);				\
197 	switch (sizeof(*__gu_ptr)) {				\
198 	case 1:							\
199 		__get_user_asm("lb", (x), __gu_ptr, __gu_err);	\
200 		break;						\
201 	case 2:							\
202 		__get_user_asm("lh", (x), __gu_ptr, __gu_err);	\
203 		break;						\
204 	case 4:							\
205 		__get_user_asm("lw", (x), __gu_ptr, __gu_err);	\
206 		break;						\
207 	case 8:							\
208 		__get_user_8((x), __gu_ptr, __gu_err);	\
209 		break;						\
210 	default:						\
211 		BUILD_BUG();					\
212 	}							\
213 	__gu_err;						\
214 })
215 
216 /**
217  * get_user: - Get a simple variable from user space.
218  * @x:   Variable to store result.
219  * @ptr: Source address, in user space.
220  *
221  * Context: User context only.  This function may sleep.
222  *
223  * This macro copies a single simple variable from user space to kernel
224  * space.  It supports simple types like char and int, but not larger
225  * data types like structures or arrays.
226  *
227  * @ptr must have pointer-to-simple-variable type, and the result of
228  * dereferencing @ptr must be assignable to @x without a cast.
229  *
230  * Returns zero on success, or -EFAULT on error.
231  * On error, the variable @x is set to zero.
232  */
233 #define get_user(x, ptr)					\
234 ({								\
235 	const __typeof__(*(ptr)) __user *__p = (ptr);		\
236 	might_fault();						\
237 	access_ok(__p, sizeof(*__p)) ?		\
238 		__get_user((x), __p) :				\
239 		((x) = 0, -EFAULT);				\
240 })
241 
242 #define __put_user_asm(insn, x, ptr, err)			\
243 do {								\
244 	uintptr_t __tmp;					\
245 	__typeof__(*(ptr)) __x = x;				\
246 	__enable_user_access();					\
247 	__asm__ __volatile__ (					\
248 		"1:\n"						\
249 		"	" insn " %z3, %2\n"			\
250 		"2:\n"						\
251 		"	.section .fixup,\"ax\"\n"		\
252 		"	.balign 4\n"				\
253 		"3:\n"						\
254 		"	li %0, %4\n"				\
255 		"	jump 2b, %1\n"				\
256 		"	.previous\n"				\
257 		"	.section __ex_table,\"a\"\n"		\
258 		"	.balign " RISCV_SZPTR "\n"			\
259 		"	" RISCV_PTR " 1b, 3b\n"			\
260 		"	.previous"				\
261 		: "+r" (err), "=r" (__tmp), "=m" (*(ptr))	\
262 		: "rJ" (__x), "i" (-EFAULT));			\
263 	__disable_user_access();				\
264 } while (0)
265 
266 #ifdef CONFIG_64BIT
267 #define __put_user_8(x, ptr, err) \
268 	__put_user_asm("sd", x, ptr, err)
269 #else /* !CONFIG_64BIT */
270 #define __put_user_8(x, ptr, err)				\
271 do {								\
272 	u32 __user *__ptr = (u32 __user *)(ptr);		\
273 	u64 __x = (__typeof__((x)-(x)))(x);			\
274 	uintptr_t __tmp;					\
275 	__enable_user_access();					\
276 	__asm__ __volatile__ (					\
277 		"1:\n"						\
278 		"	sw %z4, %2\n"				\
279 		"2:\n"						\
280 		"	sw %z5, %3\n"				\
281 		"3:\n"						\
282 		"	.section .fixup,\"ax\"\n"		\
283 		"	.balign 4\n"				\
284 		"4:\n"						\
285 		"	li %0, %6\n"				\
286 		"	jump 3b, %1\n"				\
287 		"	.previous\n"				\
288 		"	.section __ex_table,\"a\"\n"		\
289 		"	.balign " RISCV_SZPTR "\n"			\
290 		"	" RISCV_PTR " 1b, 4b\n"			\
291 		"	" RISCV_PTR " 2b, 4b\n"			\
292 		"	.previous"				\
293 		: "+r" (err), "=r" (__tmp),			\
294 			"=m" (__ptr[__LSW]),			\
295 			"=m" (__ptr[__MSW])			\
296 		: "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT));	\
297 	__disable_user_access();				\
298 } while (0)
299 #endif /* CONFIG_64BIT */
300 
301 
302 /**
303  * __put_user: - Write a simple value into user space, with less checking.
304  * @x:   Value to copy to user space.
305  * @ptr: Destination address, in user space.
306  *
307  * Context: User context only.  This function may sleep.
308  *
309  * This macro copies a single simple value from kernel space to user
310  * space.  It supports simple types like char and int, but not larger
311  * data types like structures or arrays.
312  *
313  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
314  * to the result of dereferencing @ptr.
315  *
316  * Caller must check the pointer with access_ok() before calling this
317  * function.
318  *
319  * Returns zero on success, or -EFAULT on error.
320  */
321 #define __put_user(x, ptr)					\
322 ({								\
323 	register long __pu_err = 0;				\
324 	__typeof__(*(ptr)) __user *__gu_ptr = (ptr);		\
325 	__chk_user_ptr(__gu_ptr);				\
326 	switch (sizeof(*__gu_ptr)) {				\
327 	case 1:							\
328 		__put_user_asm("sb", (x), __gu_ptr, __pu_err);	\
329 		break;						\
330 	case 2:							\
331 		__put_user_asm("sh", (x), __gu_ptr, __pu_err);	\
332 		break;						\
333 	case 4:							\
334 		__put_user_asm("sw", (x), __gu_ptr, __pu_err);	\
335 		break;						\
336 	case 8:							\
337 		__put_user_8((x), __gu_ptr, __pu_err);	\
338 		break;						\
339 	default:						\
340 		BUILD_BUG();					\
341 	}							\
342 	__pu_err;						\
343 })
344 
345 /**
346  * put_user: - Write a simple value into user space.
347  * @x:   Value to copy to user space.
348  * @ptr: Destination address, in user space.
349  *
350  * Context: User context only.  This function may sleep.
351  *
352  * This macro copies a single simple value from kernel space to user
353  * space.  It supports simple types like char and int, but not larger
354  * data types like structures or arrays.
355  *
356  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
357  * to the result of dereferencing @ptr.
358  *
359  * Returns zero on success, or -EFAULT on error.
360  */
361 #define put_user(x, ptr)					\
362 ({								\
363 	__typeof__(*(ptr)) __user *__p = (ptr);			\
364 	might_fault();						\
365 	access_ok(__p, sizeof(*__p)) ?		\
366 		__put_user((x), __p) :				\
367 		-EFAULT;					\
368 })
369 
370 
371 extern unsigned long __must_check __asm_copy_to_user(void __user *to,
372 	const void *from, unsigned long n);
373 extern unsigned long __must_check __asm_copy_from_user(void *to,
374 	const void __user *from, unsigned long n);
375 
376 static inline unsigned long
377 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
378 {
379 	return __asm_copy_from_user(to, from, n);
380 }
381 
382 static inline unsigned long
383 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
384 {
385 	return __asm_copy_to_user(to, from, n);
386 }
387 
388 extern long strncpy_from_user(char *dest, const char __user *src, long count);
389 
390 extern long __must_check strlen_user(const char __user *str);
391 extern long __must_check strnlen_user(const char __user *str, long n);
392 
393 extern
394 unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
395 
396 static inline
397 unsigned long __must_check clear_user(void __user *to, unsigned long n)
398 {
399 	might_fault();
400 	return access_ok(to, n) ?
401 		__clear_user(to, n) : n;
402 }
403 
404 /*
405  * Atomic compare-and-exchange, but with a fixup for userspace faults.  Faults
406  * will set "err" to -EFAULT, while successful accesses return the previous
407  * value.
408  */
409 #define __cmpxchg_user(ptr, old, new, err, size, lrb, scb)	\
410 ({								\
411 	__typeof__(ptr) __ptr = (ptr);				\
412 	__typeof__(*(ptr)) __old = (old);			\
413 	__typeof__(*(ptr)) __new = (new);			\
414 	__typeof__(*(ptr)) __ret;				\
415 	__typeof__(err) __err = 0;				\
416 	register unsigned int __rc;				\
417 	__enable_user_access();					\
418 	switch (size) {						\
419 	case 4:							\
420 		__asm__ __volatile__ (				\
421 		"0:\n"						\
422 		"	lr.w" #scb " %[ret], %[ptr]\n"		\
423 		"	bne          %[ret], %z[old], 1f\n"	\
424 		"	sc.w" #lrb " %[rc], %z[new], %[ptr]\n"	\
425 		"	bnez         %[rc], 0b\n"		\
426 		"1:\n"						\
427 		".section .fixup,\"ax\"\n"			\
428 		".balign 4\n"					\
429 		"2:\n"						\
430 		"	li %[err], %[efault]\n"			\
431 		"	jump 1b, %[rc]\n"			\
432 		".previous\n"					\
433 		".section __ex_table,\"a\"\n"			\
434 		".balign " RISCV_SZPTR "\n"			\
435 		"	" RISCV_PTR " 1b, 2b\n"			\
436 		".previous\n"					\
437 			: [ret] "=&r" (__ret),			\
438 			  [rc]  "=&r" (__rc),			\
439 			  [ptr] "+A" (*__ptr),			\
440 			  [err] "=&r" (__err)			\
441 			: [old] "rJ" (__old),			\
442 			  [new] "rJ" (__new),			\
443 			  [efault] "i" (-EFAULT));		\
444 		break;						\
445 	case 8:							\
446 		__asm__ __volatile__ (				\
447 		"0:\n"						\
448 		"	lr.d" #scb " %[ret], %[ptr]\n"		\
449 		"	bne          %[ret], %z[old], 1f\n"	\
450 		"	sc.d" #lrb " %[rc], %z[new], %[ptr]\n"	\
451 		"	bnez         %[rc], 0b\n"		\
452 		"1:\n"						\
453 		".section .fixup,\"ax\"\n"			\
454 		".balign 4\n"					\
455 		"2:\n"						\
456 		"	li %[err], %[efault]\n"			\
457 		"	jump 1b, %[rc]\n"			\
458 		".previous\n"					\
459 		".section __ex_table,\"a\"\n"			\
460 		".balign " RISCV_SZPTR "\n"			\
461 		"	" RISCV_PTR " 1b, 2b\n"			\
462 		".previous\n"					\
463 			: [ret] "=&r" (__ret),			\
464 			  [rc]  "=&r" (__rc),			\
465 			  [ptr] "+A" (*__ptr),			\
466 			  [err] "=&r" (__err)			\
467 			: [old] "rJ" (__old),			\
468 			  [new] "rJ" (__new),			\
469 			  [efault] "i" (-EFAULT));		\
470 		break;						\
471 	default:						\
472 		BUILD_BUG();					\
473 	}							\
474 	__disable_user_access();				\
475 	(err) = __err;						\
476 	__ret;							\
477 })
478 
479 #else /* CONFIG_MMU */
480 #include <asm-generic/uaccess.h>
481 #endif /* CONFIG_MMU */
482 #endif /* _ASM_RISCV_UACCESS_H */
483