xref: /openbmc/linux/arch/xtensa/include/asm/uaccess.h (revision c0437642966fd32b827034af6f00eecd80b89325)
1367b8112SChris Zankel /*
2367b8112SChris Zankel  * include/asm-xtensa/uaccess.h
3367b8112SChris Zankel  *
4367b8112SChris Zankel  * User space memory access functions
5367b8112SChris Zankel  *
6367b8112SChris Zankel  * These routines provide basic accessing functions to the user memory
7eef35c2dSStefan Weil  * space for the kernel. This header file provides functions such as:
8367b8112SChris Zankel  *
9367b8112SChris Zankel  * This file is subject to the terms and conditions of the GNU General Public
10367b8112SChris Zankel  * License.  See the file "COPYING" in the main directory of this archive
11367b8112SChris Zankel  * for more details.
12367b8112SChris Zankel  *
13367b8112SChris Zankel  * Copyright (C) 2001 - 2005 Tensilica Inc.
14367b8112SChris Zankel  */
15367b8112SChris Zankel 
16367b8112SChris Zankel #ifndef _XTENSA_UACCESS_H
17367b8112SChris Zankel #define _XTENSA_UACCESS_H
18367b8112SChris Zankel 
1990b03f50SWANG Cong #include <linux/prefetch.h>
20e44ba033SVitaliy Ivanov #include <asm/types.h>
210b46a94eSAl Viro #include <asm/extable.h>
22367b8112SChris Zankel 
23367b8112SChris Zankel /*
24367b8112SChris Zankel  * The fs value determines whether argument validity checking should
25367b8112SChris Zankel  * be performed or not.  If get_fs() == USER_DS, checking is
26367b8112SChris Zankel  * performed, with get_fs() == KERNEL_DS, checking is bypassed.
27367b8112SChris Zankel  *
28367b8112SChris Zankel  * For historical reasons (Data Segment Register?), these macros are
29367b8112SChris Zankel  * grossly misnamed.
30367b8112SChris Zankel  */
31367b8112SChris Zankel 
32367b8112SChris Zankel #define KERNEL_DS	((mm_segment_t) { 0 })
33367b8112SChris Zankel #define USER_DS		((mm_segment_t) { 1 })
34367b8112SChris Zankel 
35367b8112SChris Zankel #define get_fs()	(current->thread.current_ds)
36367b8112SChris Zankel #define set_fs(val)	(current->thread.current_ds = (val))
37367b8112SChris Zankel 
38367b8112SChris Zankel #define segment_eq(a, b)	((a).seg == (b).seg)
39367b8112SChris Zankel 
40db68ce10SAl Viro #define __kernel_ok (uaccess_kernel())
41c4c4594bSChris Zankel #define __user_ok(addr, size) \
42c4c4594bSChris Zankel 		(((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
43367b8112SChris Zankel #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
4496d4f267SLinus Torvalds #define access_ok(addr, size) __access_ok((unsigned long)(addr), (size))
45367b8112SChris Zankel 
4657358ba9SMax Filippov #define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE)
4757358ba9SMax Filippov 
48367b8112SChris Zankel /*
49367b8112SChris Zankel  * These are the main single-value transfer routines.  They
50367b8112SChris Zankel  * automatically use the right size if we just have the right pointer
51367b8112SChris Zankel  * type.
52367b8112SChris Zankel  *
53367b8112SChris Zankel  * This gets kind of ugly. We want to return _two_ values in
54367b8112SChris Zankel  * "get_user()" and yet we don't want to do any pointers, because that
55367b8112SChris Zankel  * is too much of a performance impact. Thus we have a few rather ugly
56367b8112SChris Zankel  * macros here, and hide all the uglyness from the user.
57367b8112SChris Zankel  *
58367b8112SChris Zankel  * Careful to not
59367b8112SChris Zankel  * (a) re-use the arguments for side effects (sizeof is ok)
60367b8112SChris Zankel  * (b) require any knowledge of processes at this stage
61367b8112SChris Zankel  */
62367b8112SChris Zankel #define put_user(x, ptr)	__put_user_check((x), (ptr), sizeof(*(ptr)))
63367b8112SChris Zankel #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
64367b8112SChris Zankel 
65367b8112SChris Zankel /*
66367b8112SChris Zankel  * The "__xxx" versions of the user access functions are versions that
67367b8112SChris Zankel  * do not verify the address space, that must have been done previously
68367b8112SChris Zankel  * with a separate "access_ok()" call (this is used when we do multiple
69367b8112SChris Zankel  * accesses to the same area of user memory).
70367b8112SChris Zankel  */
71367b8112SChris Zankel #define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
72367b8112SChris Zankel #define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
73367b8112SChris Zankel 
74367b8112SChris Zankel 
75367b8112SChris Zankel extern long __put_user_bad(void);
76367b8112SChris Zankel 
77367b8112SChris Zankel #define __put_user_nocheck(x, ptr, size)		\
78367b8112SChris Zankel ({							\
79367b8112SChris Zankel 	long __pu_err;					\
80367b8112SChris Zankel 	__put_user_size((x), (ptr), (size), __pu_err);	\
81367b8112SChris Zankel 	__pu_err;					\
82367b8112SChris Zankel })
83367b8112SChris Zankel 
84367b8112SChris Zankel #define __put_user_check(x, ptr, size)					\
85367b8112SChris Zankel ({									\
86367b8112SChris Zankel 	long __pu_err = -EFAULT;					\
87367b8112SChris Zankel 	__typeof__(*(ptr)) *__pu_addr = (ptr);				\
8896d4f267SLinus Torvalds 	if (access_ok(__pu_addr, size))			\
89367b8112SChris Zankel 		__put_user_size((x), __pu_addr, (size), __pu_err);	\
90367b8112SChris Zankel 	__pu_err;							\
91367b8112SChris Zankel })
92367b8112SChris Zankel 
93367b8112SChris Zankel #define __put_user_size(x, ptr, size, retval)				\
94367b8112SChris Zankel do {									\
95367b8112SChris Zankel 	int __cb;							\
96367b8112SChris Zankel 	retval = 0;							\
97367b8112SChris Zankel 	switch (size) {							\
98367b8112SChris Zankel 	case 1: __put_user_asm(x, ptr, retval, 1, "s8i", __cb);  break;	\
99367b8112SChris Zankel 	case 2: __put_user_asm(x, ptr, retval, 2, "s16i", __cb); break;	\
100367b8112SChris Zankel 	case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break;	\
101367b8112SChris Zankel 	case 8: {							\
102367b8112SChris Zankel 		     __typeof__(*ptr) __v64 = x;			\
1036595d144SAl Viro 		     retval = __copy_to_user(ptr, &__v64, 8) ? -EFAULT : 0;	\
104367b8112SChris Zankel 		     break;						\
105367b8112SChris Zankel 	        }							\
106367b8112SChris Zankel 	default: __put_user_bad();					\
107367b8112SChris Zankel 	}								\
108367b8112SChris Zankel } while (0)
109367b8112SChris Zankel 
110367b8112SChris Zankel 
111367b8112SChris Zankel /*
112367b8112SChris Zankel  * Consider a case of a user single load/store would cause both an
113367b8112SChris Zankel  * unaligned exception and an MMU-related exception (unaligned
114367b8112SChris Zankel  * exceptions happen first):
115367b8112SChris Zankel  *
116367b8112SChris Zankel  * User code passes a bad variable ptr to a system call.
117367b8112SChris Zankel  * Kernel tries to access the variable.
118367b8112SChris Zankel  * Unaligned exception occurs.
119367b8112SChris Zankel  * Unaligned exception handler tries to make aligned accesses.
120367b8112SChris Zankel  * Double exception occurs for MMU-related cause (e.g., page not mapped).
121367b8112SChris Zankel  * do_page_fault() thinks the fault address belongs to the kernel, not the
122367b8112SChris Zankel  * user, and panics.
123367b8112SChris Zankel  *
124367b8112SChris Zankel  * The kernel currently prohibits user unaligned accesses.  We use the
125367b8112SChris Zankel  * __check_align_* macros to check for unaligned addresses before
126367b8112SChris Zankel  * accessing user space so we don't crash the kernel.  Both
127367b8112SChris Zankel  * __put_user_asm and __get_user_asm use these alignment macros, so
128367b8112SChris Zankel  * macro-specific labels such as 0f, 1f, %0, %2, and %3 must stay in
129367b8112SChris Zankel  * sync.
130367b8112SChris Zankel  */
131367b8112SChris Zankel 
132367b8112SChris Zankel #define __check_align_1  ""
133367b8112SChris Zankel 
134367b8112SChris Zankel #define __check_align_2				\
135*c0437642SMax Filippov 	"   _bbci.l %[addr], 0, 1f	\n"	\
136*c0437642SMax Filippov 	"   movi    %[err], %[efault]	\n"	\
137367b8112SChris Zankel 	"   _j      2f			\n"
138367b8112SChris Zankel 
139367b8112SChris Zankel #define __check_align_4				\
140*c0437642SMax Filippov 	"   _bbsi.l %[addr], 0, 0f	\n"	\
141*c0437642SMax Filippov 	"   _bbci.l %[addr], 1, 1f	\n"	\
142*c0437642SMax Filippov 	"0: movi    %[err], %[efault]	\n"	\
143367b8112SChris Zankel 	"   _j      2f			\n"
144367b8112SChris Zankel 
145367b8112SChris Zankel 
146367b8112SChris Zankel /*
147367b8112SChris Zankel  * We don't tell gcc that we are accessing memory, but this is OK
148367b8112SChris Zankel  * because we do not write to any memory gcc knows about, so there
149367b8112SChris Zankel  * are no aliasing issues.
150367b8112SChris Zankel  *
151367b8112SChris Zankel  * WARNING: If you modify this macro at all, verify that the
152367b8112SChris Zankel  * __check_align_* macros still work.
153367b8112SChris Zankel  */
154*c0437642SMax Filippov #define __put_user_asm(x_, addr_, err_, align, insn, cb)\
155367b8112SChris Zankel __asm__ __volatile__(					\
156367b8112SChris Zankel 	__check_align_##align				\
157*c0437642SMax Filippov 	"1: "insn"  %[x], %[addr], 0	\n"		\
158367b8112SChris Zankel 	"2:				\n"		\
159367b8112SChris Zankel 	"   .section  .fixup,\"ax\"	\n"		\
160367b8112SChris Zankel 	"   .align 4			\n"		\
16103760270SMax Filippov 	"   .literal_position		\n"		\
162367b8112SChris Zankel 	"5:				\n"		\
163*c0437642SMax Filippov 	"   movi   %[tmp], 2b		\n"		\
164*c0437642SMax Filippov 	"   movi   %[err], %[efault]	\n"		\
165*c0437642SMax Filippov 	"   jx     %[tmp]		\n"		\
166367b8112SChris Zankel 	"   .previous			\n"		\
167367b8112SChris Zankel 	"   .section  __ex_table,\"a\"	\n"		\
168367b8112SChris Zankel 	"   .long	1b, 5b		\n"		\
169367b8112SChris Zankel 	"   .previous"					\
170*c0437642SMax Filippov 	:[err] "+r"(err_), [tmp] "=r"(cb)		\
171*c0437642SMax Filippov 	:[x] "r"(x_), [addr] "r"(addr_), [efault] "i"(-EFAULT))
172367b8112SChris Zankel 
173367b8112SChris Zankel #define __get_user_nocheck(x, ptr, size)			\
174367b8112SChris Zankel ({								\
175367b8112SChris Zankel 	long __gu_err, __gu_val;				\
176367b8112SChris Zankel 	__get_user_size(__gu_val, (ptr), (size), __gu_err);	\
1774255a8e1SMichael S. Tsirkin 	(x) = (__force __typeof__(*(ptr)))__gu_val;		\
178367b8112SChris Zankel 	__gu_err;						\
179367b8112SChris Zankel })
180367b8112SChris Zankel 
181367b8112SChris Zankel #define __get_user_check(x, ptr, size)					\
182367b8112SChris Zankel ({									\
183367b8112SChris Zankel 	long __gu_err = -EFAULT, __gu_val = 0;				\
184367b8112SChris Zankel 	const __typeof__(*(ptr)) *__gu_addr = (ptr);			\
18596d4f267SLinus Torvalds 	if (access_ok(__gu_addr, size))			\
186367b8112SChris Zankel 		__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
1874255a8e1SMichael S. Tsirkin 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
188367b8112SChris Zankel 	__gu_err;							\
189367b8112SChris Zankel })
190367b8112SChris Zankel 
191367b8112SChris Zankel extern long __get_user_bad(void);
192367b8112SChris Zankel 
193367b8112SChris Zankel #define __get_user_size(x, ptr, size, retval)				\
194367b8112SChris Zankel do {									\
195367b8112SChris Zankel 	int __cb;							\
196367b8112SChris Zankel 	retval = 0;							\
197367b8112SChris Zankel 	switch (size) {							\
198367b8112SChris Zankel 	case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb);  break;\
199367b8112SChris Zankel 	case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
200367b8112SChris Zankel 	case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb);  break;\
2016595d144SAl Viro 	case 8: {							\
2026595d144SAl Viro 		u64 __x;						\
2036595d144SAl Viro 		if (unlikely(__copy_from_user(&__x, ptr, 8))) {		\
2046595d144SAl Viro 			retval = -EFAULT;				\
2056595d144SAl Viro 			(x) = 0;					\
2066595d144SAl Viro 		} else {						\
2076595d144SAl Viro 			(x) = *(__force __typeof__((ptr)))&__x;		\
2086595d144SAl Viro 		}							\
2096595d144SAl Viro 		break;							\
2106595d144SAl Viro 	}								\
211367b8112SChris Zankel 	default: (x) = __get_user_bad();				\
212367b8112SChris Zankel 	}								\
213367b8112SChris Zankel } while (0)
214367b8112SChris Zankel 
215367b8112SChris Zankel 
216367b8112SChris Zankel /*
217367b8112SChris Zankel  * WARNING: If you modify this macro at all, verify that the
218367b8112SChris Zankel  * __check_align_* macros still work.
219367b8112SChris Zankel  */
220*c0437642SMax Filippov #define __get_user_asm(x_, addr_, err_, align, insn, cb) \
221367b8112SChris Zankel __asm__ __volatile__(				\
222367b8112SChris Zankel 	__check_align_##align			\
223*c0437642SMax Filippov 	"1: "insn"  %[x], %[addr], 0	\n"	\
224367b8112SChris Zankel 	"2:				\n"	\
225367b8112SChris Zankel 	"   .section  .fixup,\"ax\"	\n"	\
226367b8112SChris Zankel 	"   .align 4			\n"	\
22703760270SMax Filippov 	"   .literal_position		\n"	\
228367b8112SChris Zankel 	"5:				\n"	\
229*c0437642SMax Filippov 	"   movi   %[tmp], 2b		\n"	\
230*c0437642SMax Filippov 	"   movi   %[x], 0		\n"	\
231*c0437642SMax Filippov 	"   movi   %[err], %[efault]	\n"	\
232*c0437642SMax Filippov 	"   jx     %[tmp]		\n"	\
233367b8112SChris Zankel 	"   .previous			\n"	\
234367b8112SChris Zankel 	"   .section  __ex_table,\"a\"	\n"	\
235367b8112SChris Zankel 	"   .long	1b, 5b		\n"	\
236367b8112SChris Zankel 	"   .previous"				\
237*c0437642SMax Filippov 	:[err] "+r"(err_), [tmp] "=r"(cb), [x] "=r"(x_)\
238*c0437642SMax Filippov 	:[addr] "r"(addr_), [efault] "i"(-EFAULT))
239367b8112SChris Zankel 
240367b8112SChris Zankel 
241367b8112SChris Zankel /*
242367b8112SChris Zankel  * Copy to/from user space
243367b8112SChris Zankel  */
244367b8112SChris Zankel 
245367b8112SChris Zankel extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n);
246367b8112SChris Zankel 
247367b8112SChris Zankel static inline unsigned long
2483a0e75adSAl Viro raw_copy_from_user(void *to, const void __user *from, unsigned long n)
249367b8112SChris Zankel {
250367b8112SChris Zankel 	prefetchw(to);
2513a0e75adSAl Viro 	return __xtensa_copy_user(to, (__force const void *)from, n);
252367b8112SChris Zankel }
2533a0e75adSAl Viro static inline unsigned long
2543a0e75adSAl Viro raw_copy_to_user(void __user *to, const void *from, unsigned long n)
2553a0e75adSAl Viro {
2567d4914dbSMax Filippov 	prefetch(from);
2573a0e75adSAl Viro 	return __xtensa_copy_user((__force void *)to, from, n);
2583a0e75adSAl Viro }
2593a0e75adSAl Viro #define INLINE_COPY_FROM_USER
2603a0e75adSAl Viro #define INLINE_COPY_TO_USER
261367b8112SChris Zankel 
262367b8112SChris Zankel /*
263367b8112SChris Zankel  * We need to return the number of bytes not cleared.  Our memset()
264367b8112SChris Zankel  * returns zero if a problem occurs while accessing user-space memory.
265367b8112SChris Zankel  * In that event, return no memory cleared.  Otherwise, zero for
266367b8112SChris Zankel  * success.
267367b8112SChris Zankel  */
268367b8112SChris Zankel 
269367b8112SChris Zankel static inline unsigned long
270367b8112SChris Zankel __xtensa_clear_user(void *addr, unsigned long size)
271367b8112SChris Zankel {
272e0baa014SMax Filippov 	if (!__memset(addr, 0, size))
273367b8112SChris Zankel 		return size;
274367b8112SChris Zankel 	return 0;
275367b8112SChris Zankel }
276367b8112SChris Zankel 
277367b8112SChris Zankel static inline unsigned long
278367b8112SChris Zankel clear_user(void *addr, unsigned long size)
279367b8112SChris Zankel {
28096d4f267SLinus Torvalds 	if (access_ok(addr, size))
281367b8112SChris Zankel 		return __xtensa_clear_user(addr, size);
282367b8112SChris Zankel 	return size ? -EFAULT : 0;
283367b8112SChris Zankel }
284367b8112SChris Zankel 
285367b8112SChris Zankel #define __clear_user  __xtensa_clear_user
286367b8112SChris Zankel 
287367b8112SChris Zankel 
28857358ba9SMax Filippov #ifndef CONFIG_GENERIC_STRNCPY_FROM_USER
28957358ba9SMax Filippov 
290367b8112SChris Zankel extern long __strncpy_user(char *, const char *, long);
291367b8112SChris Zankel 
292367b8112SChris Zankel static inline long
293367b8112SChris Zankel strncpy_from_user(char *dst, const char *src, long count)
294367b8112SChris Zankel {
29596d4f267SLinus Torvalds 	if (access_ok(src, 1))
29610503bf9SAl Viro 		return __strncpy_user(dst, src, count);
297367b8112SChris Zankel 	return -EFAULT;
298367b8112SChris Zankel }
29957358ba9SMax Filippov #else
30057358ba9SMax Filippov long strncpy_from_user(char *dst, const char *src, long count);
30157358ba9SMax Filippov #endif
302367b8112SChris Zankel 
303367b8112SChris Zankel /*
304367b8112SChris Zankel  * Return the size of a string (including the ending 0!)
305367b8112SChris Zankel  */
306367b8112SChris Zankel extern long __strnlen_user(const char *, long);
307367b8112SChris Zankel 
308367b8112SChris Zankel static inline long strnlen_user(const char *str, long len)
309367b8112SChris Zankel {
310367b8112SChris Zankel 	unsigned long top = __kernel_ok ? ~0UL : TASK_SIZE - 1;
311367b8112SChris Zankel 
312367b8112SChris Zankel 	if ((unsigned long)str > top)
313367b8112SChris Zankel 		return 0;
314367b8112SChris Zankel 	return __strnlen_user(str, len);
315367b8112SChris Zankel }
316367b8112SChris Zankel 
317367b8112SChris Zankel #endif	/* _XTENSA_UACCESS_H */
318