xref: /openbmc/linux/arch/xtensa/include/asm/uaccess.h (revision 967747bb)
1 /*
2  * include/asm-xtensa/uaccess.h
3  *
4  * User space memory access functions
5  *
6  * These routines provide basic accessing functions to the user memory
7  * space for the kernel. This header file provides functions such as:
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file "COPYING" in the main directory of this archive
11  * for more details.
12  *
13  * Copyright (C) 2001 - 2005 Tensilica Inc.
14  */
15 
16 #ifndef _XTENSA_UACCESS_H
17 #define _XTENSA_UACCESS_H
18 
19 #include <linux/prefetch.h>
20 #include <asm/types.h>
21 #include <asm/extable.h>
22 #include <asm-generic/access_ok.h>
23 
24 /*
25  * These are the main single-value transfer routines.  They
26  * automatically use the right size if we just have the right pointer
27  * type.
28  *
29  * This gets kind of ugly. We want to return _two_ values in
30  * "get_user()" and yet we don't want to do any pointers, because that
31  * is too much of a performance impact. Thus we have a few rather ugly
32  * macros here, and hide all the uglyness from the user.
33  *
34  * Careful to not
35  * (a) re-use the arguments for side effects (sizeof is ok)
36  * (b) require any knowledge of processes at this stage
37  */
38 #define put_user(x, ptr)	__put_user_check((x), (ptr), sizeof(*(ptr)))
39 #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
40 
41 /*
42  * The "__xxx" versions of the user access functions are versions that
43  * do not verify the address space, that must have been done previously
44  * with a separate "access_ok()" call (this is used when we do multiple
45  * accesses to the same area of user memory).
46  */
47 #define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
48 #define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
49 
50 
51 extern long __put_user_bad(void);
52 
53 #define __put_user_nocheck(x, ptr, size)		\
54 ({							\
55 	long __pu_err;					\
56 	__put_user_size((x), (ptr), (size), __pu_err);	\
57 	__pu_err;					\
58 })
59 
60 #define __put_user_check(x, ptr, size)					\
61 ({									\
62 	long __pu_err = -EFAULT;					\
63 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
64 	if (access_ok(__pu_addr, size))			\
65 		__put_user_size((x), __pu_addr, (size), __pu_err);	\
66 	__pu_err;							\
67 })
68 
69 #define __put_user_size(x, ptr, size, retval)				\
70 do {									\
71 	int __cb;							\
72 	retval = 0;							\
73 	switch (size) {							\
74 	case 1: __put_user_asm(x, ptr, retval, 1, "s8i", __cb);  break;	\
75 	case 2: __put_user_asm(x, ptr, retval, 2, "s16i", __cb); break;	\
76 	case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break;	\
77 	case 8: {							\
78 		     __typeof__(*ptr) __v64 = x;			\
79 		     retval = __copy_to_user(ptr, &__v64, 8) ? -EFAULT : 0;	\
80 		     break;						\
81 	        }							\
82 	default: __put_user_bad();					\
83 	}								\
84 } while (0)
85 
86 
87 /*
88  * Consider a case of a user single load/store would cause both an
89  * unaligned exception and an MMU-related exception (unaligned
90  * exceptions happen first):
91  *
92  * User code passes a bad variable ptr to a system call.
93  * Kernel tries to access the variable.
94  * Unaligned exception occurs.
95  * Unaligned exception handler tries to make aligned accesses.
96  * Double exception occurs for MMU-related cause (e.g., page not mapped).
97  * do_page_fault() thinks the fault address belongs to the kernel, not the
98  * user, and panics.
99  *
100  * The kernel currently prohibits user unaligned accesses.  We use the
101  * __check_align_* macros to check for unaligned addresses before
102  * accessing user space so we don't crash the kernel.  Both
103  * __put_user_asm and __get_user_asm use these alignment macros, so
104  * macro-specific labels such as 0f, 1f, %0, %2, and %3 must stay in
105  * sync.
106  */
107 
108 #define __check_align_1  ""
109 
110 #define __check_align_2				\
111 	"   _bbci.l %[mem] * 0, 1f	\n"	\
112 	"   movi    %[err], %[efault]	\n"	\
113 	"   _j      2f			\n"
114 
115 #define __check_align_4				\
116 	"   _bbsi.l %[mem] * 0, 0f	\n"	\
117 	"   _bbci.l %[mem] * 0 + 1, 1f	\n"	\
118 	"0: movi    %[err], %[efault]	\n"	\
119 	"   _j      2f			\n"
120 
121 
122 /*
123  * We don't tell gcc that we are accessing memory, but this is OK
124  * because we do not write to any memory gcc knows about, so there
125  * are no aliasing issues.
126  *
127  * WARNING: If you modify this macro at all, verify that the
128  * __check_align_* macros still work.
129  */
130 #define __put_user_asm(x_, addr_, err_, align, insn, cb)\
131 __asm__ __volatile__(					\
132 	__check_align_##align				\
133 	"1: "insn"  %[x], %[mem]	\n"		\
134 	"2:				\n"		\
135 	"   .section  .fixup,\"ax\"	\n"		\
136 	"   .align 4			\n"		\
137 	"   .literal_position		\n"		\
138 	"5:				\n"		\
139 	"   movi   %[tmp], 2b		\n"		\
140 	"   movi   %[err], %[efault]	\n"		\
141 	"   jx     %[tmp]		\n"		\
142 	"   .previous			\n"		\
143 	"   .section  __ex_table,\"a\"	\n"		\
144 	"   .long	1b, 5b		\n"		\
145 	"   .previous"					\
146 	:[err] "+r"(err_), [tmp] "=r"(cb), [mem] "=m"(*(addr_))		\
147 	:[x] "r"(x_), [efault] "i"(-EFAULT))
148 
149 #define __get_user_nocheck(x, ptr, size)			\
150 ({								\
151 	long __gu_err;						\
152 	__get_user_size((x), (ptr), (size), __gu_err);		\
153 	__gu_err;						\
154 })
155 
156 #define __get_user_check(x, ptr, size)					\
157 ({									\
158 	long __gu_err = -EFAULT;					\
159 	const __typeof__(*(ptr)) __user *__gu_addr = (ptr);		\
160 	if (access_ok(__gu_addr, size))					\
161 		__get_user_size((x), __gu_addr, (size), __gu_err);	\
162 	else								\
163 		(x) = (__typeof__(*(ptr)))0;				\
164 	__gu_err;							\
165 })
166 
167 extern long __get_user_bad(void);
168 
169 #define __get_user_size(x, ptr, size, retval)				\
170 do {									\
171 	int __cb;							\
172 	retval = 0;							\
173 	switch (size) {							\
174 	case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb);  break;\
175 	case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
176 	case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb);  break;\
177 	case 8: {							\
178 		u64 __x;						\
179 		if (unlikely(__copy_from_user(&__x, ptr, 8))) {		\
180 			retval = -EFAULT;				\
181 			(x) = (__typeof__(*(ptr)))0;			\
182 		} else {						\
183 			(x) = *(__force __typeof__(*(ptr)) *)&__x;	\
184 		}							\
185 		break;							\
186 	}								\
187 	default:							\
188 		(x) = (__typeof__(*(ptr)))0;				\
189 		__get_user_bad();					\
190 	}								\
191 } while (0)
192 
193 
194 /*
195  * WARNING: If you modify this macro at all, verify that the
196  * __check_align_* macros still work.
197  */
198 #define __get_user_asm(x_, addr_, err_, align, insn, cb) \
199 do {							\
200 	u32 __x = 0;					\
201 	__asm__ __volatile__(				\
202 		__check_align_##align			\
203 		"1: "insn"  %[x], %[mem]	\n"	\
204 		"2:				\n"	\
205 		"   .section  .fixup,\"ax\"	\n"	\
206 		"   .align 4			\n"	\
207 		"   .literal_position		\n"	\
208 		"5:				\n"	\
209 		"   movi   %[tmp], 2b		\n"	\
210 		"   movi   %[err], %[efault]	\n"	\
211 		"   jx     %[tmp]		\n"	\
212 		"   .previous			\n"	\
213 		"   .section  __ex_table,\"a\"	\n"	\
214 		"   .long	1b, 5b		\n"	\
215 		"   .previous"				\
216 		:[err] "+r"(err_), [tmp] "=r"(cb), [x] "+r"(__x) \
217 		:[mem] "m"(*(addr_)), [efault] "i"(-EFAULT)); \
218 	(x_) = (__force __typeof__(*(addr_)))__x;	\
219 } while (0)
220 
221 
222 /*
223  * Copy to/from user space
224  */
225 
226 extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n);
227 
228 static inline unsigned long
raw_copy_from_user(void * to,const void __user * from,unsigned long n)229 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
230 {
231 	prefetchw(to);
232 	return __xtensa_copy_user(to, (__force const void *)from, n);
233 }
234 static inline unsigned long
raw_copy_to_user(void __user * to,const void * from,unsigned long n)235 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
236 {
237 	prefetch(from);
238 	return __xtensa_copy_user((__force void *)to, from, n);
239 }
240 #define INLINE_COPY_FROM_USER
241 #define INLINE_COPY_TO_USER
242 
243 /*
244  * We need to return the number of bytes not cleared.  Our memset()
245  * returns zero if a problem occurs while accessing user-space memory.
246  * In that event, return no memory cleared.  Otherwise, zero for
247  * success.
248  */
249 
250 static inline unsigned long
__xtensa_clear_user(void __user * addr,unsigned long size)251 __xtensa_clear_user(void __user *addr, unsigned long size)
252 {
253 	if (!__memset((void __force *)addr, 0, size))
254 		return size;
255 	return 0;
256 }
257 
258 static inline unsigned long
clear_user(void __user * addr,unsigned long size)259 clear_user(void __user *addr, unsigned long size)
260 {
261 	if (access_ok(addr, size))
262 		return __xtensa_clear_user(addr, size);
263 	return size ? -EFAULT : 0;
264 }
265 
266 #define __clear_user  __xtensa_clear_user
267 
268 
269 #ifdef CONFIG_ARCH_HAS_STRNCPY_FROM_USER
270 extern long __strncpy_user(char *dst, const char __user *src, long count);
271 
272 static inline long
strncpy_from_user(char * dst,const char __user * src,long count)273 strncpy_from_user(char *dst, const char __user *src, long count)
274 {
275 	if (access_ok(src, 1))
276 		return __strncpy_user(dst, src, count);
277 	return -EFAULT;
278 }
279 #else
280 long strncpy_from_user(char *dst, const char __user *src, long count);
281 #endif
282 
283 /*
284  * Return the size of a string (including the ending 0!)
285  */
286 extern long __strnlen_user(const char __user *str, long len);
287 
strnlen_user(const char __user * str,long len)288 static inline long strnlen_user(const char __user *str, long len)
289 {
290 	if (!access_ok(str, 1))
291 		return 0;
292 	return __strnlen_user(str, len);
293 }
294 
295 #endif	/* _XTENSA_UACCESS_H */
296