xref: /openbmc/linux/arch/riscv/include/asm/uaccess.h (revision 8ede5890)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 Regents of the University of California
4  *
5  * This file was copied from include/asm-generic/uaccess.h
6  */
7 
8 #ifndef _ASM_RISCV_UACCESS_H
9 #define _ASM_RISCV_UACCESS_H
10 
11 #include <asm/asm-extable.h>
12 #include <asm/pgtable.h>		/* for TASK_SIZE */
13 
14 /*
15  * User space memory access functions
16  */
17 #ifdef CONFIG_MMU
18 #include <linux/errno.h>
19 #include <linux/compiler.h>
20 #include <linux/thread_info.h>
21 #include <asm/byteorder.h>
22 #include <asm/extable.h>
23 #include <asm/asm.h>
24 
25 #define __enable_user_access()							\
26 	__asm__ __volatile__ ("csrs sstatus, %0" : : "r" (SR_SUM) : "memory")
27 #define __disable_user_access()							\
28 	__asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory")
29 
30 /**
31  * access_ok: - Checks if a user space pointer is valid
32  * @addr: User space pointer to start of block to check
33  * @size: Size of block to check
34  *
35  * Context: User context only.  This function may sleep.
36  *
37  * Checks if a pointer to a block of memory in user space is valid.
38  *
39  * Returns true (nonzero) if the memory block may be valid, false (zero)
40  * if it is definitely invalid.
41  *
42  * Note that, depending on architecture, this function probably just
43  * checks that the pointer is in the user space range - after calling
44  * this function, memory access functions may still return -EFAULT.
45  */
46 #define access_ok(addr, size) ({					\
47 	__chk_user_ptr(addr);						\
48 	likely(__access_ok((unsigned long __force)(addr), (size)));	\
49 })
50 
51 /*
52  * Ensure that the range [addr, addr+size) is within the process's
53  * address space
54  */
55 static inline int __access_ok(unsigned long addr, unsigned long size)
56 {
57 	return size <= TASK_SIZE && addr <= TASK_SIZE - size;
58 }
59 
60 /*
61  * The exception table consists of pairs of addresses: the first is the
62  * address of an instruction that is allowed to fault, and the second is
63  * the address at which the program should continue.  No registers are
64  * modified, so it is entirely up to the continuation code to figure out
65  * what to do.
66  *
67  * All the routines below use bits of fixup code that are out of line
68  * with the main instruction path.  This means when everything is well,
69  * we don't even have to jump over them.  Further, they do not intrude
70  * on our cache or tlb entries.
71  */
72 
73 #define __LSW	0
74 #define __MSW	1
75 
76 /*
77  * The "__xxx" versions of the user access functions do not verify the address
78  * space - it must have been done previously with a separate "access_ok()"
79  * call.
80  */
81 
82 #define __get_user_asm(insn, x, ptr, err)			\
83 do {								\
84 	__typeof__(x) __x;					\
85 	__asm__ __volatile__ (					\
86 		"1:\n"						\
87 		"	" insn " %1, %2\n"			\
88 		"2:\n"						\
89 		_ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %0, %1)	\
90 		: "+r" (err), "=&r" (__x)			\
91 		: "m" (*(ptr)));				\
92 	(x) = __x;						\
93 } while (0)
94 
95 #ifdef CONFIG_64BIT
96 #define __get_user_8(x, ptr, err) \
97 	__get_user_asm("ld", x, ptr, err)
98 #else /* !CONFIG_64BIT */
99 #define __get_user_8(x, ptr, err)				\
100 do {								\
101 	u32 __user *__ptr = (u32 __user *)(ptr);		\
102 	u32 __lo, __hi;						\
103 	__asm__ __volatile__ (					\
104 		"1:\n"						\
105 		"	lw %1, %3\n"				\
106 		"2:\n"						\
107 		"	lw %2, %4\n"				\
108 		"3:\n"						\
109 		_ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 3b, %0, %1)	\
110 		_ASM_EXTABLE_UACCESS_ERR_ZERO(2b, 3b, %0, %1)	\
111 		: "+r" (err), "=&r" (__lo), "=r" (__hi)		\
112 		: "m" (__ptr[__LSW]), "m" (__ptr[__MSW]));	\
113 	if (err)						\
114 		__hi = 0;					\
115 	(x) = (__typeof__(x))((__typeof__((x)-(x)))(		\
116 		(((u64)__hi << 32) | __lo)));			\
117 } while (0)
118 #endif /* CONFIG_64BIT */
119 
120 #define __get_user_nocheck(x, __gu_ptr, __gu_err)		\
121 do {								\
122 	switch (sizeof(*__gu_ptr)) {				\
123 	case 1:							\
124 		__get_user_asm("lb", (x), __gu_ptr, __gu_err);	\
125 		break;						\
126 	case 2:							\
127 		__get_user_asm("lh", (x), __gu_ptr, __gu_err);	\
128 		break;						\
129 	case 4:							\
130 		__get_user_asm("lw", (x), __gu_ptr, __gu_err);	\
131 		break;						\
132 	case 8:							\
133 		__get_user_8((x), __gu_ptr, __gu_err);	\
134 		break;						\
135 	default:						\
136 		BUILD_BUG();					\
137 	}							\
138 } while (0)
139 
140 /**
141  * __get_user: - Get a simple variable from user space, with less checking.
142  * @x:   Variable to store result.
143  * @ptr: Source address, in user space.
144  *
145  * Context: User context only.  This function may sleep.
146  *
147  * This macro copies a single simple variable from user space to kernel
148  * space.  It supports simple types like char and int, but not larger
149  * data types like structures or arrays.
150  *
151  * @ptr must have pointer-to-simple-variable type, and the result of
152  * dereferencing @ptr must be assignable to @x without a cast.
153  *
154  * Caller must check the pointer with access_ok() before calling this
155  * function.
156  *
157  * Returns zero on success, or -EFAULT on error.
158  * On error, the variable @x is set to zero.
159  */
160 #define __get_user(x, ptr)					\
161 ({								\
162 	const __typeof__(*(ptr)) __user *__gu_ptr = (ptr);	\
163 	long __gu_err = 0;					\
164 								\
165 	__chk_user_ptr(__gu_ptr);				\
166 								\
167 	__enable_user_access();					\
168 	__get_user_nocheck(x, __gu_ptr, __gu_err);		\
169 	__disable_user_access();				\
170 								\
171 	__gu_err;						\
172 })
173 
174 /**
175  * get_user: - Get a simple variable from user space.
176  * @x:   Variable to store result.
177  * @ptr: Source address, in user space.
178  *
179  * Context: User context only.  This function may sleep.
180  *
181  * This macro copies a single simple variable from user space to kernel
182  * space.  It supports simple types like char and int, but not larger
183  * data types like structures or arrays.
184  *
185  * @ptr must have pointer-to-simple-variable type, and the result of
186  * dereferencing @ptr must be assignable to @x without a cast.
187  *
188  * Returns zero on success, or -EFAULT on error.
189  * On error, the variable @x is set to zero.
190  */
191 #define get_user(x, ptr)					\
192 ({								\
193 	const __typeof__(*(ptr)) __user *__p = (ptr);		\
194 	might_fault();						\
195 	access_ok(__p, sizeof(*__p)) ?		\
196 		__get_user((x), __p) :				\
197 		((x) = 0, -EFAULT);				\
198 })
199 
200 #define __put_user_asm(insn, x, ptr, err)			\
201 do {								\
202 	__typeof__(*(ptr)) __x = x;				\
203 	__asm__ __volatile__ (					\
204 		"1:\n"						\
205 		"	" insn " %z2, %1\n"			\
206 		"2:\n"						\
207 		_ASM_EXTABLE_UACCESS_ERR(1b, 2b, %0)		\
208 		: "+r" (err), "=m" (*(ptr))			\
209 		: "rJ" (__x));					\
210 } while (0)
211 
212 #ifdef CONFIG_64BIT
213 #define __put_user_8(x, ptr, err) \
214 	__put_user_asm("sd", x, ptr, err)
215 #else /* !CONFIG_64BIT */
216 #define __put_user_8(x, ptr, err)				\
217 do {								\
218 	u32 __user *__ptr = (u32 __user *)(ptr);		\
219 	u64 __x = (__typeof__((x)-(x)))(x);			\
220 	__asm__ __volatile__ (					\
221 		"1:\n"						\
222 		"	sw %z3, %1\n"				\
223 		"2:\n"						\
224 		"	sw %z4, %2\n"				\
225 		"3:\n"						\
226 		_ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0)		\
227 		_ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0)		\
228 		: "+r" (err),					\
229 			"=m" (__ptr[__LSW]),			\
230 			"=m" (__ptr[__MSW])			\
231 		: "rJ" (__x), "rJ" (__x >> 32));		\
232 } while (0)
233 #endif /* CONFIG_64BIT */
234 
235 #define __put_user_nocheck(x, __gu_ptr, __pu_err)					\
236 do {								\
237 	switch (sizeof(*__gu_ptr)) {				\
238 	case 1:							\
239 		__put_user_asm("sb", (x), __gu_ptr, __pu_err);	\
240 		break;						\
241 	case 2:							\
242 		__put_user_asm("sh", (x), __gu_ptr, __pu_err);	\
243 		break;						\
244 	case 4:							\
245 		__put_user_asm("sw", (x), __gu_ptr, __pu_err);	\
246 		break;						\
247 	case 8:							\
248 		__put_user_8((x), __gu_ptr, __pu_err);	\
249 		break;						\
250 	default:						\
251 		BUILD_BUG();					\
252 	}							\
253 } while (0)
254 
255 /**
256  * __put_user: - Write a simple value into user space, with less checking.
257  * @x:   Value to copy to user space.
258  * @ptr: Destination address, in user space.
259  *
260  * Context: User context only.  This function may sleep.
261  *
262  * This macro copies a single simple value from kernel space to user
263  * space.  It supports simple types like char and int, but not larger
264  * data types like structures or arrays.
265  *
266  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
267  * to the result of dereferencing @ptr. The value of @x is copied to avoid
268  * re-ordering where @x is evaluated inside the block that enables user-space
269  * access (thus bypassing user space protection if @x is a function).
270  *
271  * Caller must check the pointer with access_ok() before calling this
272  * function.
273  *
274  * Returns zero on success, or -EFAULT on error.
275  */
276 #define __put_user(x, ptr)					\
277 ({								\
278 	__typeof__(*(ptr)) __user *__gu_ptr = (ptr);		\
279 	__typeof__(*__gu_ptr) __val = (x);			\
280 	long __pu_err = 0;					\
281 								\
282 	__chk_user_ptr(__gu_ptr);				\
283 								\
284 	__enable_user_access();					\
285 	__put_user_nocheck(__val, __gu_ptr, __pu_err);		\
286 	__disable_user_access();				\
287 								\
288 	__pu_err;						\
289 })
290 
291 /**
292  * put_user: - Write a simple value into user space.
293  * @x:   Value to copy to user space.
294  * @ptr: Destination address, in user space.
295  *
296  * Context: User context only.  This function may sleep.
297  *
298  * This macro copies a single simple value from kernel space to user
299  * space.  It supports simple types like char and int, but not larger
300  * data types like structures or arrays.
301  *
302  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
303  * to the result of dereferencing @ptr.
304  *
305  * Returns zero on success, or -EFAULT on error.
306  */
307 #define put_user(x, ptr)					\
308 ({								\
309 	__typeof__(*(ptr)) __user *__p = (ptr);			\
310 	might_fault();						\
311 	access_ok(__p, sizeof(*__p)) ?		\
312 		__put_user((x), __p) :				\
313 		-EFAULT;					\
314 })
315 
316 
317 unsigned long __must_check __asm_copy_to_user(void __user *to,
318 	const void *from, unsigned long n);
319 unsigned long __must_check __asm_copy_from_user(void *to,
320 	const void __user *from, unsigned long n);
321 
322 static inline unsigned long
323 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
324 {
325 	return __asm_copy_from_user(to, from, n);
326 }
327 
328 static inline unsigned long
329 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
330 {
331 	return __asm_copy_to_user(to, from, n);
332 }
333 
334 extern long strncpy_from_user(char *dest, const char __user *src, long count);
335 
336 extern long __must_check strnlen_user(const char __user *str, long n);
337 
338 extern
339 unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
340 
341 static inline
342 unsigned long __must_check clear_user(void __user *to, unsigned long n)
343 {
344 	might_fault();
345 	return access_ok(to, n) ?
346 		__clear_user(to, n) : n;
347 }
348 
349 #define HAVE_GET_KERNEL_NOFAULT
350 
351 #define __get_kernel_nofault(dst, src, type, err_label)			\
352 do {									\
353 	long __kr_err;							\
354 									\
355 	__get_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err);	\
356 	if (unlikely(__kr_err))						\
357 		goto err_label;						\
358 } while (0)
359 
360 #define __put_kernel_nofault(dst, src, type, err_label)			\
361 do {									\
362 	long __kr_err;							\
363 									\
364 	__put_user_nocheck(*((type *)(src)), (type *)(dst), __kr_err);	\
365 	if (unlikely(__kr_err))						\
366 		goto err_label;						\
367 } while (0)
368 
369 #else /* CONFIG_MMU */
370 #include <asm-generic/uaccess.h>
371 #endif /* CONFIG_MMU */
372 #endif /* _ASM_RISCV_UACCESS_H */
373