xref: /openbmc/linux/arch/xtensa/include/asm/uaccess.h (revision 10503bf9435f304b7a74ebfdb8571dec001d751c)
1367b8112SChris Zankel /*
2367b8112SChris Zankel  * include/asm-xtensa/uaccess.h
3367b8112SChris Zankel  *
4367b8112SChris Zankel  * User space memory access functions
5367b8112SChris Zankel  *
6367b8112SChris Zankel  * These routines provide basic accessing functions to the user memory
7eef35c2dSStefan Weil  * space for the kernel. This header file provides functions such as:
8367b8112SChris Zankel  *
9367b8112SChris Zankel  * This file is subject to the terms and conditions of the GNU General Public
10367b8112SChris Zankel  * License.  See the file "COPYING" in the main directory of this archive
11367b8112SChris Zankel  * for more details.
12367b8112SChris Zankel  *
13367b8112SChris Zankel  * Copyright (C) 2001 - 2005 Tensilica Inc.
14367b8112SChris Zankel  */
15367b8112SChris Zankel 
16367b8112SChris Zankel #ifndef _XTENSA_UACCESS_H
17367b8112SChris Zankel #define _XTENSA_UACCESS_H
18367b8112SChris Zankel 
1990b03f50SWANG Cong #include <linux/prefetch.h>
20e44ba033SVitaliy Ivanov #include <asm/types.h>
210b46a94eSAl Viro #include <asm/extable.h>
22367b8112SChris Zankel 
23367b8112SChris Zankel /*
24367b8112SChris Zankel  * The fs value determines whether argument validity checking should
25367b8112SChris Zankel  * be performed or not.  If get_fs() == USER_DS, checking is
26367b8112SChris Zankel  * performed, with get_fs() == KERNEL_DS, checking is bypassed.
27367b8112SChris Zankel  *
28367b8112SChris Zankel  * For historical reasons (Data Segment Register?), these macros are
29367b8112SChris Zankel  * grossly misnamed.
30367b8112SChris Zankel  */
31367b8112SChris Zankel 
32367b8112SChris Zankel #define KERNEL_DS	((mm_segment_t) { 0 })
33367b8112SChris Zankel #define USER_DS		((mm_segment_t) { 1 })
34367b8112SChris Zankel 
35367b8112SChris Zankel #define get_ds()	(KERNEL_DS)
36367b8112SChris Zankel #define get_fs()	(current->thread.current_ds)
37367b8112SChris Zankel #define set_fs(val)	(current->thread.current_ds = (val))
38367b8112SChris Zankel 
39367b8112SChris Zankel #define segment_eq(a, b)	((a).seg == (b).seg)
40367b8112SChris Zankel 
41db68ce10SAl Viro #define __kernel_ok (uaccess_kernel())
42c4c4594bSChris Zankel #define __user_ok(addr, size) \
43c4c4594bSChris Zankel 		(((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
44367b8112SChris Zankel #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
45367b8112SChris Zankel #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size))
46367b8112SChris Zankel 
47367b8112SChris Zankel /*
48367b8112SChris Zankel  * These are the main single-value transfer routines.  They
49367b8112SChris Zankel  * automatically use the right size if we just have the right pointer
50367b8112SChris Zankel  * type.
51367b8112SChris Zankel  *
52367b8112SChris Zankel  * This gets kind of ugly. We want to return _two_ values in
53367b8112SChris Zankel  * "get_user()" and yet we don't want to do any pointers, because that
54367b8112SChris Zankel  * is too much of a performance impact. Thus we have a few rather ugly
55367b8112SChris Zankel  * macros here, and hide all the uglyness from the user.
56367b8112SChris Zankel  *
57367b8112SChris Zankel  * Careful to not
58367b8112SChris Zankel  * (a) re-use the arguments for side effects (sizeof is ok)
59367b8112SChris Zankel  * (b) require any knowledge of processes at this stage
60367b8112SChris Zankel  */
61367b8112SChris Zankel #define put_user(x, ptr)	__put_user_check((x), (ptr), sizeof(*(ptr)))
62367b8112SChris Zankel #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
63367b8112SChris Zankel 
64367b8112SChris Zankel /*
65367b8112SChris Zankel  * The "__xxx" versions of the user access functions are versions that
66367b8112SChris Zankel  * do not verify the address space, that must have been done previously
67367b8112SChris Zankel  * with a separate "access_ok()" call (this is used when we do multiple
68367b8112SChris Zankel  * accesses to the same area of user memory).
69367b8112SChris Zankel  */
70367b8112SChris Zankel #define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
71367b8112SChris Zankel #define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
72367b8112SChris Zankel 
73367b8112SChris Zankel 
74367b8112SChris Zankel extern long __put_user_bad(void);
75367b8112SChris Zankel 
76367b8112SChris Zankel #define __put_user_nocheck(x, ptr, size)		\
77367b8112SChris Zankel ({							\
78367b8112SChris Zankel 	long __pu_err;					\
79367b8112SChris Zankel 	__put_user_size((x), (ptr), (size), __pu_err);	\
80367b8112SChris Zankel 	__pu_err;					\
81367b8112SChris Zankel })
82367b8112SChris Zankel 
83367b8112SChris Zankel #define __put_user_check(x, ptr, size)					\
84367b8112SChris Zankel ({									\
85367b8112SChris Zankel 	long __pu_err = -EFAULT;					\
86367b8112SChris Zankel 	__typeof__(*(ptr)) *__pu_addr = (ptr);				\
87367b8112SChris Zankel 	if (access_ok(VERIFY_WRITE, __pu_addr, size))			\
88367b8112SChris Zankel 		__put_user_size((x), __pu_addr, (size), __pu_err);	\
89367b8112SChris Zankel 	__pu_err;							\
90367b8112SChris Zankel })
91367b8112SChris Zankel 
92367b8112SChris Zankel #define __put_user_size(x, ptr, size, retval)				\
93367b8112SChris Zankel do {									\
94367b8112SChris Zankel 	int __cb;							\
95367b8112SChris Zankel 	retval = 0;							\
96367b8112SChris Zankel 	switch (size) {							\
97367b8112SChris Zankel 	case 1: __put_user_asm(x, ptr, retval, 1, "s8i", __cb);  break;	\
98367b8112SChris Zankel 	case 2: __put_user_asm(x, ptr, retval, 2, "s16i", __cb); break;	\
99367b8112SChris Zankel 	case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break;	\
100367b8112SChris Zankel 	case 8: {							\
101367b8112SChris Zankel 		     __typeof__(*ptr) __v64 = x;			\
102367b8112SChris Zankel 		     retval = __copy_to_user(ptr, &__v64, 8);		\
103367b8112SChris Zankel 		     break;						\
104367b8112SChris Zankel 	        }							\
105367b8112SChris Zankel 	default: __put_user_bad();					\
106367b8112SChris Zankel 	}								\
107367b8112SChris Zankel } while (0)
108367b8112SChris Zankel 
109367b8112SChris Zankel 
110367b8112SChris Zankel /*
111367b8112SChris Zankel  * Consider a case of a user single load/store would cause both an
112367b8112SChris Zankel  * unaligned exception and an MMU-related exception (unaligned
113367b8112SChris Zankel  * exceptions happen first):
114367b8112SChris Zankel  *
115367b8112SChris Zankel  * User code passes a bad variable ptr to a system call.
116367b8112SChris Zankel  * Kernel tries to access the variable.
117367b8112SChris Zankel  * Unaligned exception occurs.
118367b8112SChris Zankel  * Unaligned exception handler tries to make aligned accesses.
119367b8112SChris Zankel  * Double exception occurs for MMU-related cause (e.g., page not mapped).
120367b8112SChris Zankel  * do_page_fault() thinks the fault address belongs to the kernel, not the
121367b8112SChris Zankel  * user, and panics.
122367b8112SChris Zankel  *
123367b8112SChris Zankel  * The kernel currently prohibits user unaligned accesses.  We use the
124367b8112SChris Zankel  * __check_align_* macros to check for unaligned addresses before
125367b8112SChris Zankel  * accessing user space so we don't crash the kernel.  Both
126367b8112SChris Zankel  * __put_user_asm and __get_user_asm use these alignment macros, so
127367b8112SChris Zankel  * macro-specific labels such as 0f, 1f, %0, %2, and %3 must stay in
128367b8112SChris Zankel  * sync.
129367b8112SChris Zankel  */
130367b8112SChris Zankel 
131367b8112SChris Zankel #define __check_align_1  ""
132367b8112SChris Zankel 
133367b8112SChris Zankel #define __check_align_2				\
134367b8112SChris Zankel 	"   _bbci.l %3,  0, 1f		\n"	\
135367b8112SChris Zankel 	"   movi    %0, %4		\n"	\
136367b8112SChris Zankel 	"   _j      2f			\n"
137367b8112SChris Zankel 
138367b8112SChris Zankel #define __check_align_4				\
139367b8112SChris Zankel 	"   _bbsi.l %3,  0, 0f		\n"	\
140367b8112SChris Zankel 	"   _bbci.l %3,  1, 1f		\n"	\
141367b8112SChris Zankel 	"0: movi    %0, %4		\n"	\
142367b8112SChris Zankel 	"   _j      2f			\n"
143367b8112SChris Zankel 
144367b8112SChris Zankel 
145367b8112SChris Zankel /*
146367b8112SChris Zankel  * We don't tell gcc that we are accessing memory, but this is OK
147367b8112SChris Zankel  * because we do not write to any memory gcc knows about, so there
148367b8112SChris Zankel  * are no aliasing issues.
149367b8112SChris Zankel  *
150367b8112SChris Zankel  * WARNING: If you modify this macro at all, verify that the
151367b8112SChris Zankel  * __check_align_* macros still work.
152367b8112SChris Zankel  */
153367b8112SChris Zankel #define __put_user_asm(x, addr, err, align, insn, cb)	\
154367b8112SChris Zankel __asm__ __volatile__(					\
155367b8112SChris Zankel 	__check_align_##align				\
156367b8112SChris Zankel 	"1: "insn"  %2, %3, 0		\n"		\
157367b8112SChris Zankel 	"2:				\n"		\
158367b8112SChris Zankel 	"   .section  .fixup,\"ax\"	\n"		\
159367b8112SChris Zankel 	"   .align 4			\n"		\
160367b8112SChris Zankel 	"4:				\n"		\
161367b8112SChris Zankel 	"   .long  2b			\n"		\
162367b8112SChris Zankel 	"5:				\n"		\
163367b8112SChris Zankel 	"   l32r   %1, 4b		\n"		\
164367b8112SChris Zankel 	"   movi   %0, %4		\n"		\
165367b8112SChris Zankel 	"   jx     %1			\n"		\
166367b8112SChris Zankel 	"   .previous			\n"		\
167367b8112SChris Zankel 	"   .section  __ex_table,\"a\"	\n"		\
168367b8112SChris Zankel 	"   .long	1b, 5b		\n"		\
169367b8112SChris Zankel 	"   .previous"					\
170367b8112SChris Zankel 	:"=r" (err), "=r" (cb)				\
171367b8112SChris Zankel 	:"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err))
172367b8112SChris Zankel 
173367b8112SChris Zankel #define __get_user_nocheck(x, ptr, size)			\
174367b8112SChris Zankel ({								\
175367b8112SChris Zankel 	long __gu_err, __gu_val;				\
176367b8112SChris Zankel 	__get_user_size(__gu_val, (ptr), (size), __gu_err);	\
1774255a8e1SMichael S. Tsirkin 	(x) = (__force __typeof__(*(ptr)))__gu_val;		\
178367b8112SChris Zankel 	__gu_err;						\
179367b8112SChris Zankel })
180367b8112SChris Zankel 
181367b8112SChris Zankel #define __get_user_check(x, ptr, size)					\
182367b8112SChris Zankel ({									\
183367b8112SChris Zankel 	long __gu_err = -EFAULT, __gu_val = 0;				\
184367b8112SChris Zankel 	const __typeof__(*(ptr)) *__gu_addr = (ptr);			\
185367b8112SChris Zankel 	if (access_ok(VERIFY_READ, __gu_addr, size))			\
186367b8112SChris Zankel 		__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
1874255a8e1SMichael S. Tsirkin 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
188367b8112SChris Zankel 	__gu_err;							\
189367b8112SChris Zankel })
190367b8112SChris Zankel 
191367b8112SChris Zankel extern long __get_user_bad(void);
192367b8112SChris Zankel 
193367b8112SChris Zankel #define __get_user_size(x, ptr, size, retval)				\
194367b8112SChris Zankel do {									\
195367b8112SChris Zankel 	int __cb;							\
196367b8112SChris Zankel 	retval = 0;							\
197367b8112SChris Zankel 	switch (size) {							\
198367b8112SChris Zankel 	case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb);  break;\
199367b8112SChris Zankel 	case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
200367b8112SChris Zankel 	case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb);  break;\
201367b8112SChris Zankel 	case 8: retval = __copy_from_user(&x, ptr, 8);    break;	\
202367b8112SChris Zankel 	default: (x) = __get_user_bad();				\
203367b8112SChris Zankel 	}								\
204367b8112SChris Zankel } while (0)
205367b8112SChris Zankel 
206367b8112SChris Zankel 
207367b8112SChris Zankel /*
208367b8112SChris Zankel  * WARNING: If you modify this macro at all, verify that the
209367b8112SChris Zankel  * __check_align_* macros still work.
210367b8112SChris Zankel  */
211367b8112SChris Zankel #define __get_user_asm(x, addr, err, align, insn, cb) \
212367b8112SChris Zankel __asm__ __volatile__(			\
213367b8112SChris Zankel 	__check_align_##align			\
214367b8112SChris Zankel 	"1: "insn"  %2, %3, 0		\n"	\
215367b8112SChris Zankel 	"2:				\n"	\
216367b8112SChris Zankel 	"   .section  .fixup,\"ax\"	\n"	\
217367b8112SChris Zankel 	"   .align 4			\n"	\
218367b8112SChris Zankel 	"4:				\n"	\
219367b8112SChris Zankel 	"   .long  2b			\n"	\
220367b8112SChris Zankel 	"5:				\n"	\
221367b8112SChris Zankel 	"   l32r   %1, 4b		\n"	\
222367b8112SChris Zankel 	"   movi   %2, 0		\n"	\
223367b8112SChris Zankel 	"   movi   %0, %4		\n"	\
224367b8112SChris Zankel 	"   jx     %1			\n"	\
225367b8112SChris Zankel 	"   .previous			\n"	\
226367b8112SChris Zankel 	"   .section  __ex_table,\"a\"	\n"	\
227367b8112SChris Zankel 	"   .long	1b, 5b		\n"	\
228367b8112SChris Zankel 	"   .previous"				\
229367b8112SChris Zankel 	:"=r" (err), "=r" (cb), "=r" (x)	\
230367b8112SChris Zankel 	:"r" (addr), "i" (-EFAULT), "0" (err))
231367b8112SChris Zankel 
232367b8112SChris Zankel 
233367b8112SChris Zankel /*
234367b8112SChris Zankel  * Copy to/from user space
235367b8112SChris Zankel  */
236367b8112SChris Zankel 
237367b8112SChris Zankel extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n);
238367b8112SChris Zankel 
239367b8112SChris Zankel static inline unsigned long
2403a0e75adSAl Viro raw_copy_from_user(void *to, const void __user *from, unsigned long n)
241367b8112SChris Zankel {
242367b8112SChris Zankel 	prefetchw(to);
2433a0e75adSAl Viro 	return __xtensa_copy_user(to, (__force const void *)from, n);
244367b8112SChris Zankel }
2453a0e75adSAl Viro static inline unsigned long
2463a0e75adSAl Viro raw_copy_to_user(void __user *to, const void *from, unsigned long n)
2473a0e75adSAl Viro {
2487d4914dbSMax Filippov 	prefetch(from);
2493a0e75adSAl Viro 	return __xtensa_copy_user((__force void *)to, from, n);
2503a0e75adSAl Viro }
2513a0e75adSAl Viro #define INLINE_COPY_FROM_USER
2523a0e75adSAl Viro #define INLINE_COPY_TO_USER
253367b8112SChris Zankel 
254367b8112SChris Zankel /*
255367b8112SChris Zankel  * We need to return the number of bytes not cleared.  Our memset()
256367b8112SChris Zankel  * returns zero if a problem occurs while accessing user-space memory.
257367b8112SChris Zankel  * In that event, return no memory cleared.  Otherwise, zero for
258367b8112SChris Zankel  * success.
259367b8112SChris Zankel  */
260367b8112SChris Zankel 
261367b8112SChris Zankel static inline unsigned long
262367b8112SChris Zankel __xtensa_clear_user(void *addr, unsigned long size)
263367b8112SChris Zankel {
264367b8112SChris Zankel 	if ( ! memset(addr, 0, size) )
265367b8112SChris Zankel 		return size;
266367b8112SChris Zankel 	return 0;
267367b8112SChris Zankel }
268367b8112SChris Zankel 
269367b8112SChris Zankel static inline unsigned long
270367b8112SChris Zankel clear_user(void *addr, unsigned long size)
271367b8112SChris Zankel {
272367b8112SChris Zankel 	if (access_ok(VERIFY_WRITE, addr, size))
273367b8112SChris Zankel 		return __xtensa_clear_user(addr, size);
274367b8112SChris Zankel 	return size ? -EFAULT : 0;
275367b8112SChris Zankel }
276367b8112SChris Zankel 
277367b8112SChris Zankel #define __clear_user  __xtensa_clear_user
278367b8112SChris Zankel 
279367b8112SChris Zankel 
280367b8112SChris Zankel extern long __strncpy_user(char *, const char *, long);
281367b8112SChris Zankel 
282367b8112SChris Zankel static inline long
283367b8112SChris Zankel strncpy_from_user(char *dst, const char *src, long count)
284367b8112SChris Zankel {
285367b8112SChris Zankel 	if (access_ok(VERIFY_READ, src, 1))
286*10503bf9SAl Viro 		return __strncpy_user(dst, src, count);
287367b8112SChris Zankel 	return -EFAULT;
288367b8112SChris Zankel }
289367b8112SChris Zankel 
290367b8112SChris Zankel /*
291367b8112SChris Zankel  * Return the size of a string (including the ending 0!)
292367b8112SChris Zankel  */
293367b8112SChris Zankel extern long __strnlen_user(const char *, long);
294367b8112SChris Zankel 
295367b8112SChris Zankel static inline long strnlen_user(const char *str, long len)
296367b8112SChris Zankel {
297367b8112SChris Zankel 	unsigned long top = __kernel_ok ? ~0UL : TASK_SIZE - 1;
298367b8112SChris Zankel 
299367b8112SChris Zankel 	if ((unsigned long)str > top)
300367b8112SChris Zankel 		return 0;
301367b8112SChris Zankel 	return __strnlen_user(str, len);
302367b8112SChris Zankel }
303367b8112SChris Zankel 
304367b8112SChris Zankel #endif	/* _XTENSA_UACCESS_H */
305