1367b8112SChris Zankel /*
2367b8112SChris Zankel * include/asm-xtensa/uaccess.h
3367b8112SChris Zankel *
4367b8112SChris Zankel * User space memory access functions
5367b8112SChris Zankel *
6367b8112SChris Zankel * These routines provide basic accessing functions to the user memory
7eef35c2dSStefan Weil * space for the kernel. This header file provides functions such as:
8367b8112SChris Zankel *
9367b8112SChris Zankel * This file is subject to the terms and conditions of the GNU General Public
10367b8112SChris Zankel * License. See the file "COPYING" in the main directory of this archive
11367b8112SChris Zankel * for more details.
12367b8112SChris Zankel *
13367b8112SChris Zankel * Copyright (C) 2001 - 2005 Tensilica Inc.
14367b8112SChris Zankel */
15367b8112SChris Zankel
16367b8112SChris Zankel #ifndef _XTENSA_UACCESS_H
17367b8112SChris Zankel #define _XTENSA_UACCESS_H
18367b8112SChris Zankel
1990b03f50SWANG Cong #include <linux/prefetch.h>
20e44ba033SVitaliy Ivanov #include <asm/types.h>
210b46a94eSAl Viro #include <asm/extable.h>
22*12700c17SArnd Bergmann #include <asm-generic/access_ok.h>
2357358ba9SMax Filippov
24367b8112SChris Zankel /*
25367b8112SChris Zankel * These are the main single-value transfer routines. They
26367b8112SChris Zankel * automatically use the right size if we just have the right pointer
27367b8112SChris Zankel * type.
28367b8112SChris Zankel *
29367b8112SChris Zankel * This gets kind of ugly. We want to return _two_ values in
30367b8112SChris Zankel * "get_user()" and yet we don't want to do any pointers, because that
31367b8112SChris Zankel * is too much of a performance impact. Thus we have a few rather ugly
32367b8112SChris Zankel * macros here, and hide all the uglyness from the user.
33367b8112SChris Zankel *
34367b8112SChris Zankel * Careful to not
35367b8112SChris Zankel * (a) re-use the arguments for side effects (sizeof is ok)
36367b8112SChris Zankel * (b) require any knowledge of processes at this stage
37367b8112SChris Zankel */
38367b8112SChris Zankel #define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
39367b8112SChris Zankel #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
40367b8112SChris Zankel
41367b8112SChris Zankel /*
42367b8112SChris Zankel * The "__xxx" versions of the user access functions are versions that
43367b8112SChris Zankel * do not verify the address space, that must have been done previously
44367b8112SChris Zankel * with a separate "access_ok()" call (this is used when we do multiple
45367b8112SChris Zankel * accesses to the same area of user memory).
46367b8112SChris Zankel */
47367b8112SChris Zankel #define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
48367b8112SChris Zankel #define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
49367b8112SChris Zankel
50367b8112SChris Zankel
51367b8112SChris Zankel extern long __put_user_bad(void);
52367b8112SChris Zankel
53367b8112SChris Zankel #define __put_user_nocheck(x, ptr, size) \
54367b8112SChris Zankel ({ \
55367b8112SChris Zankel long __pu_err; \
56367b8112SChris Zankel __put_user_size((x), (ptr), (size), __pu_err); \
57367b8112SChris Zankel __pu_err; \
58367b8112SChris Zankel })
59367b8112SChris Zankel
60367b8112SChris Zankel #define __put_user_check(x, ptr, size) \
61367b8112SChris Zankel ({ \
62367b8112SChris Zankel long __pu_err = -EFAULT; \
633ac4a615SMax Filippov __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
6496d4f267SLinus Torvalds if (access_ok(__pu_addr, size)) \
65367b8112SChris Zankel __put_user_size((x), __pu_addr, (size), __pu_err); \
66367b8112SChris Zankel __pu_err; \
67367b8112SChris Zankel })
68367b8112SChris Zankel
69367b8112SChris Zankel #define __put_user_size(x, ptr, size, retval) \
70367b8112SChris Zankel do { \
71367b8112SChris Zankel int __cb; \
72367b8112SChris Zankel retval = 0; \
73367b8112SChris Zankel switch (size) { \
74367b8112SChris Zankel case 1: __put_user_asm(x, ptr, retval, 1, "s8i", __cb); break; \
75367b8112SChris Zankel case 2: __put_user_asm(x, ptr, retval, 2, "s16i", __cb); break; \
76367b8112SChris Zankel case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break; \
77367b8112SChris Zankel case 8: { \
78367b8112SChris Zankel __typeof__(*ptr) __v64 = x; \
796595d144SAl Viro retval = __copy_to_user(ptr, &__v64, 8) ? -EFAULT : 0; \
80367b8112SChris Zankel break; \
81367b8112SChris Zankel } \
82367b8112SChris Zankel default: __put_user_bad(); \
83367b8112SChris Zankel } \
84367b8112SChris Zankel } while (0)
85367b8112SChris Zankel
86367b8112SChris Zankel
87367b8112SChris Zankel /*
88367b8112SChris Zankel * Consider a case of a user single load/store would cause both an
89367b8112SChris Zankel * unaligned exception and an MMU-related exception (unaligned
90367b8112SChris Zankel * exceptions happen first):
91367b8112SChris Zankel *
92367b8112SChris Zankel * User code passes a bad variable ptr to a system call.
93367b8112SChris Zankel * Kernel tries to access the variable.
94367b8112SChris Zankel * Unaligned exception occurs.
95367b8112SChris Zankel * Unaligned exception handler tries to make aligned accesses.
96367b8112SChris Zankel * Double exception occurs for MMU-related cause (e.g., page not mapped).
97367b8112SChris Zankel * do_page_fault() thinks the fault address belongs to the kernel, not the
98367b8112SChris Zankel * user, and panics.
99367b8112SChris Zankel *
100367b8112SChris Zankel * The kernel currently prohibits user unaligned accesses. We use the
101367b8112SChris Zankel * __check_align_* macros to check for unaligned addresses before
102367b8112SChris Zankel * accessing user space so we don't crash the kernel. Both
103367b8112SChris Zankel * __put_user_asm and __get_user_asm use these alignment macros, so
104367b8112SChris Zankel * macro-specific labels such as 0f, 1f, %0, %2, and %3 must stay in
105367b8112SChris Zankel * sync.
106367b8112SChris Zankel */
107367b8112SChris Zankel
108367b8112SChris Zankel #define __check_align_1 ""
109367b8112SChris Zankel
110367b8112SChris Zankel #define __check_align_2 \
111cbc6e287SMax Filippov " _bbci.l %[mem] * 0, 1f \n" \
112c0437642SMax Filippov " movi %[err], %[efault] \n" \
113367b8112SChris Zankel " _j 2f \n"
114367b8112SChris Zankel
115367b8112SChris Zankel #define __check_align_4 \
116cbc6e287SMax Filippov " _bbsi.l %[mem] * 0, 0f \n" \
117cbc6e287SMax Filippov " _bbci.l %[mem] * 0 + 1, 1f \n" \
118c0437642SMax Filippov "0: movi %[err], %[efault] \n" \
119367b8112SChris Zankel " _j 2f \n"
120367b8112SChris Zankel
121367b8112SChris Zankel
122367b8112SChris Zankel /*
123367b8112SChris Zankel * We don't tell gcc that we are accessing memory, but this is OK
124367b8112SChris Zankel * because we do not write to any memory gcc knows about, so there
125367b8112SChris Zankel * are no aliasing issues.
126367b8112SChris Zankel *
127367b8112SChris Zankel * WARNING: If you modify this macro at all, verify that the
128367b8112SChris Zankel * __check_align_* macros still work.
129367b8112SChris Zankel */
130c0437642SMax Filippov #define __put_user_asm(x_, addr_, err_, align, insn, cb)\
131367b8112SChris Zankel __asm__ __volatile__( \
132367b8112SChris Zankel __check_align_##align \
133cbc6e287SMax Filippov "1: "insn" %[x], %[mem] \n" \
134367b8112SChris Zankel "2: \n" \
135367b8112SChris Zankel " .section .fixup,\"ax\" \n" \
136367b8112SChris Zankel " .align 4 \n" \
13703760270SMax Filippov " .literal_position \n" \
138367b8112SChris Zankel "5: \n" \
139c0437642SMax Filippov " movi %[tmp], 2b \n" \
140c0437642SMax Filippov " movi %[err], %[efault] \n" \
141c0437642SMax Filippov " jx %[tmp] \n" \
142367b8112SChris Zankel " .previous \n" \
143367b8112SChris Zankel " .section __ex_table,\"a\" \n" \
144367b8112SChris Zankel " .long 1b, 5b \n" \
145367b8112SChris Zankel " .previous" \
146cbc6e287SMax Filippov :[err] "+r"(err_), [tmp] "=r"(cb), [mem] "=m"(*(addr_)) \
147cbc6e287SMax Filippov :[x] "r"(x_), [efault] "i"(-EFAULT))
148367b8112SChris Zankel
149367b8112SChris Zankel #define __get_user_nocheck(x, ptr, size) \
150367b8112SChris Zankel ({ \
151c9c63f3cSMax Filippov long __gu_err; \
152c9c63f3cSMax Filippov __get_user_size((x), (ptr), (size), __gu_err); \
153367b8112SChris Zankel __gu_err; \
154367b8112SChris Zankel })
155367b8112SChris Zankel
156367b8112SChris Zankel #define __get_user_check(x, ptr, size) \
157367b8112SChris Zankel ({ \
158c9c63f3cSMax Filippov long __gu_err = -EFAULT; \
1593ac4a615SMax Filippov const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
16096d4f267SLinus Torvalds if (access_ok(__gu_addr, size)) \
161c9c63f3cSMax Filippov __get_user_size((x), __gu_addr, (size), __gu_err); \
162c9c63f3cSMax Filippov else \
1639afcc71bSMax Filippov (x) = (__typeof__(*(ptr)))0; \
164367b8112SChris Zankel __gu_err; \
165367b8112SChris Zankel })
166367b8112SChris Zankel
167367b8112SChris Zankel extern long __get_user_bad(void);
168367b8112SChris Zankel
169367b8112SChris Zankel #define __get_user_size(x, ptr, size, retval) \
170367b8112SChris Zankel do { \
171367b8112SChris Zankel int __cb; \
172367b8112SChris Zankel retval = 0; \
173367b8112SChris Zankel switch (size) { \
174367b8112SChris Zankel case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb); break;\
175367b8112SChris Zankel case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
176367b8112SChris Zankel case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb); break;\
1776595d144SAl Viro case 8: { \
1786595d144SAl Viro u64 __x; \
1796595d144SAl Viro if (unlikely(__copy_from_user(&__x, ptr, 8))) { \
1806595d144SAl Viro retval = -EFAULT; \
1819afcc71bSMax Filippov (x) = (__typeof__(*(ptr)))0; \
1826595d144SAl Viro } else { \
183c22f9075SMax Filippov (x) = *(__force __typeof__(*(ptr)) *)&__x; \
1846595d144SAl Viro } \
1856595d144SAl Viro break; \
1866595d144SAl Viro } \
1879afcc71bSMax Filippov default: \
1889afcc71bSMax Filippov (x) = (__typeof__(*(ptr)))0; \
1899afcc71bSMax Filippov __get_user_bad(); \
190367b8112SChris Zankel } \
191367b8112SChris Zankel } while (0)
192367b8112SChris Zankel
193367b8112SChris Zankel
194367b8112SChris Zankel /*
195367b8112SChris Zankel * WARNING: If you modify this macro at all, verify that the
196367b8112SChris Zankel * __check_align_* macros still work.
197367b8112SChris Zankel */
198c0437642SMax Filippov #define __get_user_asm(x_, addr_, err_, align, insn, cb) \
199c9c63f3cSMax Filippov do { \
200c9c63f3cSMax Filippov u32 __x = 0; \
201367b8112SChris Zankel __asm__ __volatile__( \
202367b8112SChris Zankel __check_align_##align \
203cbc6e287SMax Filippov "1: "insn" %[x], %[mem] \n" \
204367b8112SChris Zankel "2: \n" \
205367b8112SChris Zankel " .section .fixup,\"ax\" \n" \
206367b8112SChris Zankel " .align 4 \n" \
20703760270SMax Filippov " .literal_position \n" \
208367b8112SChris Zankel "5: \n" \
209c0437642SMax Filippov " movi %[tmp], 2b \n" \
210c0437642SMax Filippov " movi %[err], %[efault] \n" \
211c0437642SMax Filippov " jx %[tmp] \n" \
212367b8112SChris Zankel " .previous \n" \
213367b8112SChris Zankel " .section __ex_table,\"a\" \n" \
214367b8112SChris Zankel " .long 1b, 5b \n" \
215367b8112SChris Zankel " .previous" \
216c9c63f3cSMax Filippov :[err] "+r"(err_), [tmp] "=r"(cb), [x] "+r"(__x) \
217cbc6e287SMax Filippov :[mem] "m"(*(addr_)), [efault] "i"(-EFAULT)); \
218c9c63f3cSMax Filippov (x_) = (__force __typeof__(*(addr_)))__x; \
219c9c63f3cSMax Filippov } while (0)
220367b8112SChris Zankel
221367b8112SChris Zankel
222367b8112SChris Zankel /*
223367b8112SChris Zankel * Copy to/from user space
224367b8112SChris Zankel */
225367b8112SChris Zankel
226367b8112SChris Zankel extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n);
227367b8112SChris Zankel
228367b8112SChris Zankel static inline unsigned long
raw_copy_from_user(void * to,const void __user * from,unsigned long n)2293a0e75adSAl Viro raw_copy_from_user(void *to, const void __user *from, unsigned long n)
230367b8112SChris Zankel {
231367b8112SChris Zankel prefetchw(to);
2323a0e75adSAl Viro return __xtensa_copy_user(to, (__force const void *)from, n);
233367b8112SChris Zankel }
2343a0e75adSAl Viro static inline unsigned long
raw_copy_to_user(void __user * to,const void * from,unsigned long n)2353a0e75adSAl Viro raw_copy_to_user(void __user *to, const void *from, unsigned long n)
2363a0e75adSAl Viro {
2377d4914dbSMax Filippov prefetch(from);
2383a0e75adSAl Viro return __xtensa_copy_user((__force void *)to, from, n);
2393a0e75adSAl Viro }
2403a0e75adSAl Viro #define INLINE_COPY_FROM_USER
2413a0e75adSAl Viro #define INLINE_COPY_TO_USER
242367b8112SChris Zankel
243367b8112SChris Zankel /*
244367b8112SChris Zankel * We need to return the number of bytes not cleared. Our memset()
245367b8112SChris Zankel * returns zero if a problem occurs while accessing user-space memory.
246367b8112SChris Zankel * In that event, return no memory cleared. Otherwise, zero for
247367b8112SChris Zankel * success.
248367b8112SChris Zankel */
249367b8112SChris Zankel
250367b8112SChris Zankel static inline unsigned long
__xtensa_clear_user(void __user * addr,unsigned long size)2512adf5352SMax Filippov __xtensa_clear_user(void __user *addr, unsigned long size)
252367b8112SChris Zankel {
2532adf5352SMax Filippov if (!__memset((void __force *)addr, 0, size))
254367b8112SChris Zankel return size;
255367b8112SChris Zankel return 0;
256367b8112SChris Zankel }
257367b8112SChris Zankel
258367b8112SChris Zankel static inline unsigned long
clear_user(void __user * addr,unsigned long size)2592adf5352SMax Filippov clear_user(void __user *addr, unsigned long size)
260367b8112SChris Zankel {
26196d4f267SLinus Torvalds if (access_ok(addr, size))
262367b8112SChris Zankel return __xtensa_clear_user(addr, size);
263367b8112SChris Zankel return size ? -EFAULT : 0;
264367b8112SChris Zankel }
265367b8112SChris Zankel
266367b8112SChris Zankel #define __clear_user __xtensa_clear_user
267367b8112SChris Zankel
268367b8112SChris Zankel
269e6226997SArnd Bergmann #ifdef CONFIG_ARCH_HAS_STRNCPY_FROM_USER
2702adf5352SMax Filippov extern long __strncpy_user(char *dst, const char __user *src, long count);
271367b8112SChris Zankel
272367b8112SChris Zankel static inline long
strncpy_from_user(char * dst,const char __user * src,long count)2732adf5352SMax Filippov strncpy_from_user(char *dst, const char __user *src, long count)
274367b8112SChris Zankel {
27596d4f267SLinus Torvalds if (access_ok(src, 1))
27610503bf9SAl Viro return __strncpy_user(dst, src, count);
277367b8112SChris Zankel return -EFAULT;
278367b8112SChris Zankel }
27957358ba9SMax Filippov #else
280dc293f21SLaurent Pinchart long strncpy_from_user(char *dst, const char __user *src, long count);
28157358ba9SMax Filippov #endif
282367b8112SChris Zankel
283367b8112SChris Zankel /*
284367b8112SChris Zankel * Return the size of a string (including the ending 0!)
285367b8112SChris Zankel */
2862adf5352SMax Filippov extern long __strnlen_user(const char __user *str, long len);
287367b8112SChris Zankel
strnlen_user(const char __user * str,long len)2882adf5352SMax Filippov static inline long strnlen_user(const char __user *str, long len)
289367b8112SChris Zankel {
2902adf5352SMax Filippov if (!access_ok(str, 1))
291367b8112SChris Zankel return 0;
292367b8112SChris Zankel return __strnlen_user(str, len);
293367b8112SChris Zankel }
294367b8112SChris Zankel
295367b8112SChris Zankel #endif /* _XTENSA_UACCESS_H */
296