xref: /openbmc/linux/arch/xtensa/include/asm/uaccess.h (revision 367b8112fe2ea5c39a7bb4d263dcdd9b612fae18)
1*367b8112SChris Zankel /*
2*367b8112SChris Zankel  * include/asm-xtensa/uaccess.h
3*367b8112SChris Zankel  *
4*367b8112SChris Zankel  * User space memory access functions
5*367b8112SChris Zankel  *
6*367b8112SChris Zankel  * These routines provide basic accessing functions to the user memory
7*367b8112SChris Zankel  * space for the kernel. This header file provides fuctions such as:
8*367b8112SChris Zankel  *
9*367b8112SChris Zankel  * This file is subject to the terms and conditions of the GNU General Public
10*367b8112SChris Zankel  * License.  See the file "COPYING" in the main directory of this archive
11*367b8112SChris Zankel  * for more details.
12*367b8112SChris Zankel  *
13*367b8112SChris Zankel  * Copyright (C) 2001 - 2005 Tensilica Inc.
14*367b8112SChris Zankel  */
15*367b8112SChris Zankel 
16*367b8112SChris Zankel #ifndef _XTENSA_UACCESS_H
17*367b8112SChris Zankel #define _XTENSA_UACCESS_H
18*367b8112SChris Zankel 
19*367b8112SChris Zankel #include <linux/errno.h>
20*367b8112SChris Zankel 
21*367b8112SChris Zankel #define VERIFY_READ    0
22*367b8112SChris Zankel #define VERIFY_WRITE   1
23*367b8112SChris Zankel 
24*367b8112SChris Zankel #ifdef __ASSEMBLY__
25*367b8112SChris Zankel 
26*367b8112SChris Zankel #include <asm/current.h>
27*367b8112SChris Zankel #include <asm/asm-offsets.h>
28*367b8112SChris Zankel #include <asm/processor.h>
29*367b8112SChris Zankel #include <asm/types.h>
30*367b8112SChris Zankel 
31*367b8112SChris Zankel /*
32*367b8112SChris Zankel  * These assembly macros mirror the C macros that follow below.  They
33*367b8112SChris Zankel  * should always have identical functionality.  See
34*367b8112SChris Zankel  * arch/xtensa/kernel/sys.S for usage.
35*367b8112SChris Zankel  */
36*367b8112SChris Zankel 
37*367b8112SChris Zankel #define KERNEL_DS	0
38*367b8112SChris Zankel #define USER_DS		1
39*367b8112SChris Zankel 
40*367b8112SChris Zankel #define get_ds		(KERNEL_DS)
41*367b8112SChris Zankel 
42*367b8112SChris Zankel /*
43*367b8112SChris Zankel  * get_fs reads current->thread.current_ds into a register.
44*367b8112SChris Zankel  * On Entry:
45*367b8112SChris Zankel  * 	<ad>	anything
46*367b8112SChris Zankel  * 	<sp>	stack
47*367b8112SChris Zankel  * On Exit:
48*367b8112SChris Zankel  * 	<ad>	contains current->thread.current_ds
49*367b8112SChris Zankel  */
50*367b8112SChris Zankel 	.macro	get_fs	ad, sp
51*367b8112SChris Zankel 	GET_CURRENT(\ad,\sp)
52*367b8112SChris Zankel 	l32i	\ad, \ad, THREAD_CURRENT_DS
53*367b8112SChris Zankel 	.endm
54*367b8112SChris Zankel 
55*367b8112SChris Zankel /*
56*367b8112SChris Zankel  * set_fs sets current->thread.current_ds to some value.
57*367b8112SChris Zankel  * On Entry:
58*367b8112SChris Zankel  *	<at>	anything (temp register)
59*367b8112SChris Zankel  *	<av>	value to write
60*367b8112SChris Zankel  *	<sp>	stack
61*367b8112SChris Zankel  * On Exit:
62*367b8112SChris Zankel  *	<at>	destroyed (actually, current)
63*367b8112SChris Zankel  *	<av>	preserved, value to write
64*367b8112SChris Zankel  */
65*367b8112SChris Zankel 	.macro	set_fs	at, av, sp
66*367b8112SChris Zankel 	GET_CURRENT(\at,\sp)
67*367b8112SChris Zankel 	s32i	\av, \at, THREAD_CURRENT_DS
68*367b8112SChris Zankel 	.endm
69*367b8112SChris Zankel 
70*367b8112SChris Zankel /*
71*367b8112SChris Zankel  * kernel_ok determines whether we should bypass addr/size checking.
72*367b8112SChris Zankel  * See the equivalent C-macro version below for clarity.
73*367b8112SChris Zankel  * On success, kernel_ok branches to a label indicated by parameter
74*367b8112SChris Zankel  * <success>.  This implies that the macro falls through to the next
75*367b8112SChris Zankel  * insruction on an error.
76*367b8112SChris Zankel  *
77*367b8112SChris Zankel  * Note that while this macro can be used independently, we designed
78*367b8112SChris Zankel  * in for optimal use in the access_ok macro below (i.e., we fall
79*367b8112SChris Zankel  * through on error).
80*367b8112SChris Zankel  *
81*367b8112SChris Zankel  * On Entry:
82*367b8112SChris Zankel  * 	<at>		anything (temp register)
83*367b8112SChris Zankel  * 	<success>	label to branch to on success; implies
84*367b8112SChris Zankel  * 			fall-through macro on error
85*367b8112SChris Zankel  * 	<sp>		stack pointer
86*367b8112SChris Zankel  * On Exit:
87*367b8112SChris Zankel  * 	<at>		destroyed (actually, current->thread.current_ds)
88*367b8112SChris Zankel  */
89*367b8112SChris Zankel 
90*367b8112SChris Zankel #if ((KERNEL_DS != 0) || (USER_DS == 0))
91*367b8112SChris Zankel # error Assembly macro kernel_ok fails
92*367b8112SChris Zankel #endif
93*367b8112SChris Zankel 	.macro	kernel_ok  at, sp, success
94*367b8112SChris Zankel 	get_fs	\at, \sp
95*367b8112SChris Zankel 	beqz	\at, \success
96*367b8112SChris Zankel 	.endm
97*367b8112SChris Zankel 
98*367b8112SChris Zankel /*
99*367b8112SChris Zankel  * user_ok determines whether the access to user-space memory is allowed.
100*367b8112SChris Zankel  * See the equivalent C-macro version below for clarity.
101*367b8112SChris Zankel  *
102*367b8112SChris Zankel  * On error, user_ok branches to a label indicated by parameter
103*367b8112SChris Zankel  * <error>.  This implies that the macro falls through to the next
104*367b8112SChris Zankel  * instruction on success.
105*367b8112SChris Zankel  *
106*367b8112SChris Zankel  * Note that while this macro can be used independently, we designed
107*367b8112SChris Zankel  * in for optimal use in the access_ok macro below (i.e., we fall
108*367b8112SChris Zankel  * through on success).
109*367b8112SChris Zankel  *
110*367b8112SChris Zankel  * On Entry:
111*367b8112SChris Zankel  * 	<aa>	register containing memory address
112*367b8112SChris Zankel  * 	<as>	register containing memory size
113*367b8112SChris Zankel  * 	<at>	temp register
114*367b8112SChris Zankel  * 	<error>	label to branch to on error; implies fall-through
115*367b8112SChris Zankel  * 		macro on success
116*367b8112SChris Zankel  * On Exit:
117*367b8112SChris Zankel  * 	<aa>	preserved
118*367b8112SChris Zankel  * 	<as>	preserved
119*367b8112SChris Zankel  * 	<at>	destroyed (actually, (TASK_SIZE + 1 - size))
120*367b8112SChris Zankel  */
121*367b8112SChris Zankel 	.macro	user_ok	aa, as, at, error
122*367b8112SChris Zankel 	movi	\at, __XTENSA_UL_CONST(TASK_SIZE)
123*367b8112SChris Zankel 	bgeu	\as, \at, \error
124*367b8112SChris Zankel 	sub	\at, \at, \as
125*367b8112SChris Zankel 	bgeu	\aa, \at, \error
126*367b8112SChris Zankel 	.endm
127*367b8112SChris Zankel 
128*367b8112SChris Zankel /*
129*367b8112SChris Zankel  * access_ok determines whether a memory access is allowed.  See the
130*367b8112SChris Zankel  * equivalent C-macro version below for clarity.
131*367b8112SChris Zankel  *
132*367b8112SChris Zankel  * On error, access_ok branches to a label indicated by parameter
133*367b8112SChris Zankel  * <error>.  This implies that the macro falls through to the next
134*367b8112SChris Zankel  * instruction on success.
135*367b8112SChris Zankel  *
136*367b8112SChris Zankel  * Note that we assume success is the common case, and we optimize the
137*367b8112SChris Zankel  * branch fall-through case on success.
138*367b8112SChris Zankel  *
139*367b8112SChris Zankel  * On Entry:
140*367b8112SChris Zankel  * 	<aa>	register containing memory address
141*367b8112SChris Zankel  * 	<as>	register containing memory size
142*367b8112SChris Zankel  * 	<at>	temp register
143*367b8112SChris Zankel  * 	<sp>
144*367b8112SChris Zankel  * 	<error>	label to branch to on error; implies fall-through
145*367b8112SChris Zankel  * 		macro on success
146*367b8112SChris Zankel  * On Exit:
147*367b8112SChris Zankel  * 	<aa>	preserved
148*367b8112SChris Zankel  * 	<as>	preserved
149*367b8112SChris Zankel  * 	<at>	destroyed
150*367b8112SChris Zankel  */
151*367b8112SChris Zankel 	.macro	access_ok  aa, as, at, sp, error
152*367b8112SChris Zankel 	kernel_ok  \at, \sp, .Laccess_ok_\@
153*367b8112SChris Zankel 	user_ok    \aa, \as, \at, \error
154*367b8112SChris Zankel .Laccess_ok_\@:
155*367b8112SChris Zankel 	.endm
156*367b8112SChris Zankel 
157*367b8112SChris Zankel #else /* __ASSEMBLY__ not defined */
158*367b8112SChris Zankel 
159*367b8112SChris Zankel #include <linux/sched.h>
160*367b8112SChris Zankel #include <asm/types.h>
161*367b8112SChris Zankel 
162*367b8112SChris Zankel /*
163*367b8112SChris Zankel  * The fs value determines whether argument validity checking should
164*367b8112SChris Zankel  * be performed or not.  If get_fs() == USER_DS, checking is
165*367b8112SChris Zankel  * performed, with get_fs() == KERNEL_DS, checking is bypassed.
166*367b8112SChris Zankel  *
167*367b8112SChris Zankel  * For historical reasons (Data Segment Register?), these macros are
168*367b8112SChris Zankel  * grossly misnamed.
169*367b8112SChris Zankel  */
170*367b8112SChris Zankel 
171*367b8112SChris Zankel #define KERNEL_DS	((mm_segment_t) { 0 })
172*367b8112SChris Zankel #define USER_DS		((mm_segment_t) { 1 })
173*367b8112SChris Zankel 
174*367b8112SChris Zankel #define get_ds()	(KERNEL_DS)
175*367b8112SChris Zankel #define get_fs()	(current->thread.current_ds)
176*367b8112SChris Zankel #define set_fs(val)	(current->thread.current_ds = (val))
177*367b8112SChris Zankel 
178*367b8112SChris Zankel #define segment_eq(a,b)	((a).seg == (b).seg)
179*367b8112SChris Zankel 
180*367b8112SChris Zankel #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
181*367b8112SChris Zankel #define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
182*367b8112SChris Zankel #define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
183*367b8112SChris Zankel #define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
184*367b8112SChris Zankel 
185*367b8112SChris Zankel /*
186*367b8112SChris Zankel  * These are the main single-value transfer routines.  They
187*367b8112SChris Zankel  * automatically use the right size if we just have the right pointer
188*367b8112SChris Zankel  * type.
189*367b8112SChris Zankel  *
190*367b8112SChris Zankel  * This gets kind of ugly. We want to return _two_ values in
191*367b8112SChris Zankel  * "get_user()" and yet we don't want to do any pointers, because that
192*367b8112SChris Zankel  * is too much of a performance impact. Thus we have a few rather ugly
193*367b8112SChris Zankel  * macros here, and hide all the uglyness from the user.
194*367b8112SChris Zankel  *
195*367b8112SChris Zankel  * Careful to not
196*367b8112SChris Zankel  * (a) re-use the arguments for side effects (sizeof is ok)
197*367b8112SChris Zankel  * (b) require any knowledge of processes at this stage
198*367b8112SChris Zankel  */
199*367b8112SChris Zankel #define put_user(x,ptr)	__put_user_check((x),(ptr),sizeof(*(ptr)))
200*367b8112SChris Zankel #define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr)))
201*367b8112SChris Zankel 
202*367b8112SChris Zankel /*
203*367b8112SChris Zankel  * The "__xxx" versions of the user access functions are versions that
204*367b8112SChris Zankel  * do not verify the address space, that must have been done previously
205*367b8112SChris Zankel  * with a separate "access_ok()" call (this is used when we do multiple
206*367b8112SChris Zankel  * accesses to the same area of user memory).
207*367b8112SChris Zankel  */
208*367b8112SChris Zankel #define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr)))
209*367b8112SChris Zankel #define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
210*367b8112SChris Zankel 
211*367b8112SChris Zankel 
212*367b8112SChris Zankel extern long __put_user_bad(void);
213*367b8112SChris Zankel 
214*367b8112SChris Zankel #define __put_user_nocheck(x,ptr,size)			\
215*367b8112SChris Zankel ({							\
216*367b8112SChris Zankel 	long __pu_err;					\
217*367b8112SChris Zankel 	__put_user_size((x),(ptr),(size),__pu_err);	\
218*367b8112SChris Zankel 	__pu_err;					\
219*367b8112SChris Zankel })
220*367b8112SChris Zankel 
221*367b8112SChris Zankel #define __put_user_check(x,ptr,size)				\
222*367b8112SChris Zankel ({								\
223*367b8112SChris Zankel 	long __pu_err = -EFAULT;				\
224*367b8112SChris Zankel 	__typeof__(*(ptr)) *__pu_addr = (ptr);			\
225*367b8112SChris Zankel 	if (access_ok(VERIFY_WRITE,__pu_addr,size))		\
226*367b8112SChris Zankel 		__put_user_size((x),__pu_addr,(size),__pu_err);	\
227*367b8112SChris Zankel 	__pu_err;						\
228*367b8112SChris Zankel })
229*367b8112SChris Zankel 
230*367b8112SChris Zankel #define __put_user_size(x,ptr,size,retval)				\
231*367b8112SChris Zankel do {									\
232*367b8112SChris Zankel 	int __cb;							\
233*367b8112SChris Zankel 	retval = 0;							\
234*367b8112SChris Zankel 	switch (size) {							\
235*367b8112SChris Zankel         case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb);  break;	\
236*367b8112SChris Zankel         case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break;	\
237*367b8112SChris Zankel         case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break;	\
238*367b8112SChris Zankel         case 8: {							\
239*367b8112SChris Zankel 		     __typeof__(*ptr) __v64 = x;			\
240*367b8112SChris Zankel 		     retval = __copy_to_user(ptr,&__v64,8);		\
241*367b8112SChris Zankel 		     break;						\
242*367b8112SChris Zankel 	        }							\
243*367b8112SChris Zankel 	default: __put_user_bad();					\
244*367b8112SChris Zankel 	}								\
245*367b8112SChris Zankel } while (0)
246*367b8112SChris Zankel 
247*367b8112SChris Zankel 
248*367b8112SChris Zankel /*
249*367b8112SChris Zankel  * Consider a case of a user single load/store would cause both an
250*367b8112SChris Zankel  * unaligned exception and an MMU-related exception (unaligned
251*367b8112SChris Zankel  * exceptions happen first):
252*367b8112SChris Zankel  *
253*367b8112SChris Zankel  * User code passes a bad variable ptr to a system call.
254*367b8112SChris Zankel  * Kernel tries to access the variable.
255*367b8112SChris Zankel  * Unaligned exception occurs.
256*367b8112SChris Zankel  * Unaligned exception handler tries to make aligned accesses.
257*367b8112SChris Zankel  * Double exception occurs for MMU-related cause (e.g., page not mapped).
258*367b8112SChris Zankel  * do_page_fault() thinks the fault address belongs to the kernel, not the
259*367b8112SChris Zankel  * user, and panics.
260*367b8112SChris Zankel  *
261*367b8112SChris Zankel  * The kernel currently prohibits user unaligned accesses.  We use the
262*367b8112SChris Zankel  * __check_align_* macros to check for unaligned addresses before
263*367b8112SChris Zankel  * accessing user space so we don't crash the kernel.  Both
264*367b8112SChris Zankel  * __put_user_asm and __get_user_asm use these alignment macros, so
265*367b8112SChris Zankel  * macro-specific labels such as 0f, 1f, %0, %2, and %3 must stay in
266*367b8112SChris Zankel  * sync.
267*367b8112SChris Zankel  */
268*367b8112SChris Zankel 
269*367b8112SChris Zankel #define __check_align_1  ""
270*367b8112SChris Zankel 
271*367b8112SChris Zankel #define __check_align_2				\
272*367b8112SChris Zankel 	"   _bbci.l %3,  0, 1f		\n"	\
273*367b8112SChris Zankel 	"   movi    %0, %4		\n"	\
274*367b8112SChris Zankel 	"   _j      2f			\n"
275*367b8112SChris Zankel 
276*367b8112SChris Zankel #define __check_align_4				\
277*367b8112SChris Zankel 	"   _bbsi.l %3,  0, 0f		\n"	\
278*367b8112SChris Zankel 	"   _bbci.l %3,  1, 1f		\n"	\
279*367b8112SChris Zankel 	"0: movi    %0, %4		\n"	\
280*367b8112SChris Zankel 	"   _j      2f			\n"
281*367b8112SChris Zankel 
282*367b8112SChris Zankel 
283*367b8112SChris Zankel /*
284*367b8112SChris Zankel  * We don't tell gcc that we are accessing memory, but this is OK
285*367b8112SChris Zankel  * because we do not write to any memory gcc knows about, so there
286*367b8112SChris Zankel  * are no aliasing issues.
287*367b8112SChris Zankel  *
288*367b8112SChris Zankel  * WARNING: If you modify this macro at all, verify that the
289*367b8112SChris Zankel  * __check_align_* macros still work.
290*367b8112SChris Zankel  */
291*367b8112SChris Zankel #define __put_user_asm(x, addr, err, align, insn, cb)	\
292*367b8112SChris Zankel    __asm__ __volatile__(				\
293*367b8112SChris Zankel 	__check_align_##align				\
294*367b8112SChris Zankel 	"1: "insn"  %2, %3, 0		\n"		\
295*367b8112SChris Zankel 	"2:				\n"		\
296*367b8112SChris Zankel 	"   .section  .fixup,\"ax\"	\n"		\
297*367b8112SChris Zankel 	"   .align 4			\n"		\
298*367b8112SChris Zankel 	"4:				\n"		\
299*367b8112SChris Zankel 	"   .long  2b			\n"		\
300*367b8112SChris Zankel 	"5:				\n"		\
301*367b8112SChris Zankel 	"   l32r   %1, 4b		\n"		\
302*367b8112SChris Zankel         "   movi   %0, %4		\n"		\
303*367b8112SChris Zankel         "   jx     %1			\n"		\
304*367b8112SChris Zankel 	"   .previous			\n"		\
305*367b8112SChris Zankel 	"   .section  __ex_table,\"a\"	\n"		\
306*367b8112SChris Zankel 	"   .long	1b, 5b		\n"		\
307*367b8112SChris Zankel 	"   .previous"					\
308*367b8112SChris Zankel 	:"=r" (err), "=r" (cb)				\
309*367b8112SChris Zankel 	:"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err))
310*367b8112SChris Zankel 
311*367b8112SChris Zankel #define __get_user_nocheck(x,ptr,size)				\
312*367b8112SChris Zankel ({								\
313*367b8112SChris Zankel 	long __gu_err, __gu_val;				\
314*367b8112SChris Zankel 	__get_user_size(__gu_val,(ptr),(size),__gu_err);	\
315*367b8112SChris Zankel 	(x) = (__typeof__(*(ptr)))__gu_val;			\
316*367b8112SChris Zankel 	__gu_err;						\
317*367b8112SChris Zankel })
318*367b8112SChris Zankel 
319*367b8112SChris Zankel #define __get_user_check(x,ptr,size)					\
320*367b8112SChris Zankel ({									\
321*367b8112SChris Zankel 	long __gu_err = -EFAULT, __gu_val = 0;				\
322*367b8112SChris Zankel 	const __typeof__(*(ptr)) *__gu_addr = (ptr);			\
323*367b8112SChris Zankel 	if (access_ok(VERIFY_READ,__gu_addr,size))			\
324*367b8112SChris Zankel 		__get_user_size(__gu_val,__gu_addr,(size),__gu_err);	\
325*367b8112SChris Zankel 	(x) = (__typeof__(*(ptr)))__gu_val;				\
326*367b8112SChris Zankel 	__gu_err;							\
327*367b8112SChris Zankel })
328*367b8112SChris Zankel 
329*367b8112SChris Zankel extern long __get_user_bad(void);
330*367b8112SChris Zankel 
331*367b8112SChris Zankel #define __get_user_size(x,ptr,size,retval)				\
332*367b8112SChris Zankel do {									\
333*367b8112SChris Zankel 	int __cb;							\
334*367b8112SChris Zankel 	retval = 0;							\
335*367b8112SChris Zankel         switch (size) {							\
336*367b8112SChris Zankel           case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb);  break;	\
337*367b8112SChris Zankel           case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break;	\
338*367b8112SChris Zankel           case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb);  break;	\
339*367b8112SChris Zankel           case 8: retval = __copy_from_user(&x,ptr,8);    break;	\
340*367b8112SChris Zankel           default: (x) = __get_user_bad();				\
341*367b8112SChris Zankel         }								\
342*367b8112SChris Zankel } while (0)
343*367b8112SChris Zankel 
344*367b8112SChris Zankel 
345*367b8112SChris Zankel /*
346*367b8112SChris Zankel  * WARNING: If you modify this macro at all, verify that the
347*367b8112SChris Zankel  * __check_align_* macros still work.
348*367b8112SChris Zankel  */
349*367b8112SChris Zankel #define __get_user_asm(x, addr, err, align, insn, cb) \
350*367b8112SChris Zankel    __asm__ __volatile__(			\
351*367b8112SChris Zankel 	__check_align_##align			\
352*367b8112SChris Zankel 	"1: "insn"  %2, %3, 0		\n"	\
353*367b8112SChris Zankel 	"2:				\n"	\
354*367b8112SChris Zankel 	"   .section  .fixup,\"ax\"	\n"	\
355*367b8112SChris Zankel 	"   .align 4			\n"	\
356*367b8112SChris Zankel 	"4:				\n"	\
357*367b8112SChris Zankel 	"   .long  2b			\n"	\
358*367b8112SChris Zankel 	"5:				\n"	\
359*367b8112SChris Zankel 	"   l32r   %1, 4b		\n"	\
360*367b8112SChris Zankel 	"   movi   %2, 0		\n"	\
361*367b8112SChris Zankel         "   movi   %0, %4		\n"	\
362*367b8112SChris Zankel         "   jx     %1			\n"	\
363*367b8112SChris Zankel 	"   .previous			\n"	\
364*367b8112SChris Zankel 	"   .section  __ex_table,\"a\"	\n"	\
365*367b8112SChris Zankel 	"   .long	1b, 5b		\n"	\
366*367b8112SChris Zankel 	"   .previous"				\
367*367b8112SChris Zankel 	:"=r" (err), "=r" (cb), "=r" (x)	\
368*367b8112SChris Zankel 	:"r" (addr), "i" (-EFAULT), "0" (err))
369*367b8112SChris Zankel 
370*367b8112SChris Zankel 
371*367b8112SChris Zankel /*
372*367b8112SChris Zankel  * Copy to/from user space
373*367b8112SChris Zankel  */
374*367b8112SChris Zankel 
375*367b8112SChris Zankel /*
376*367b8112SChris Zankel  * We use a generic, arbitrary-sized copy subroutine.  The Xtensa
377*367b8112SChris Zankel  * architecture would cause heavy code bloat if we tried to inline
378*367b8112SChris Zankel  * these functions and provide __constant_copy_* equivalents like the
379*367b8112SChris Zankel  * i386 versions.  __xtensa_copy_user is quite efficient.  See the
380*367b8112SChris Zankel  * .fixup section of __xtensa_copy_user for a discussion on the
381*367b8112SChris Zankel  * X_zeroing equivalents for Xtensa.
382*367b8112SChris Zankel  */
383*367b8112SChris Zankel 
384*367b8112SChris Zankel extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n);
385*367b8112SChris Zankel #define __copy_user(to,from,size) __xtensa_copy_user(to,from,size)
386*367b8112SChris Zankel 
387*367b8112SChris Zankel 
388*367b8112SChris Zankel static inline unsigned long
389*367b8112SChris Zankel __generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
390*367b8112SChris Zankel {
391*367b8112SChris Zankel 	return __copy_user(to,from,n);
392*367b8112SChris Zankel }
393*367b8112SChris Zankel 
394*367b8112SChris Zankel static inline unsigned long
395*367b8112SChris Zankel __generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
396*367b8112SChris Zankel {
397*367b8112SChris Zankel 	return __copy_user(to,from,n);
398*367b8112SChris Zankel }
399*367b8112SChris Zankel 
400*367b8112SChris Zankel static inline unsigned long
401*367b8112SChris Zankel __generic_copy_to_user(void *to, const void *from, unsigned long n)
402*367b8112SChris Zankel {
403*367b8112SChris Zankel 	prefetch(from);
404*367b8112SChris Zankel 	if (access_ok(VERIFY_WRITE, to, n))
405*367b8112SChris Zankel 		return __copy_user(to,from,n);
406*367b8112SChris Zankel 	return n;
407*367b8112SChris Zankel }
408*367b8112SChris Zankel 
409*367b8112SChris Zankel static inline unsigned long
410*367b8112SChris Zankel __generic_copy_from_user(void *to, const void *from, unsigned long n)
411*367b8112SChris Zankel {
412*367b8112SChris Zankel 	prefetchw(to);
413*367b8112SChris Zankel 	if (access_ok(VERIFY_READ, from, n))
414*367b8112SChris Zankel 		return __copy_user(to,from,n);
415*367b8112SChris Zankel 	else
416*367b8112SChris Zankel 		memset(to, 0, n);
417*367b8112SChris Zankel 	return n;
418*367b8112SChris Zankel }
419*367b8112SChris Zankel 
420*367b8112SChris Zankel #define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n))
421*367b8112SChris Zankel #define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n))
422*367b8112SChris Zankel #define __copy_to_user(to,from,n) __generic_copy_to_user_nocheck((to),(from),(n))
423*367b8112SChris Zankel #define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n))
424*367b8112SChris Zankel #define __copy_to_user_inatomic __copy_to_user
425*367b8112SChris Zankel #define __copy_from_user_inatomic __copy_from_user
426*367b8112SChris Zankel 
427*367b8112SChris Zankel 
428*367b8112SChris Zankel /*
429*367b8112SChris Zankel  * We need to return the number of bytes not cleared.  Our memset()
430*367b8112SChris Zankel  * returns zero if a problem occurs while accessing user-space memory.
431*367b8112SChris Zankel  * In that event, return no memory cleared.  Otherwise, zero for
432*367b8112SChris Zankel  * success.
433*367b8112SChris Zankel  */
434*367b8112SChris Zankel 
435*367b8112SChris Zankel static inline unsigned long
436*367b8112SChris Zankel __xtensa_clear_user(void *addr, unsigned long size)
437*367b8112SChris Zankel {
438*367b8112SChris Zankel 	if ( ! memset(addr, 0, size) )
439*367b8112SChris Zankel 		return size;
440*367b8112SChris Zankel 	return 0;
441*367b8112SChris Zankel }
442*367b8112SChris Zankel 
443*367b8112SChris Zankel static inline unsigned long
444*367b8112SChris Zankel clear_user(void *addr, unsigned long size)
445*367b8112SChris Zankel {
446*367b8112SChris Zankel 	if (access_ok(VERIFY_WRITE, addr, size))
447*367b8112SChris Zankel 		return __xtensa_clear_user(addr, size);
448*367b8112SChris Zankel 	return size ? -EFAULT : 0;
449*367b8112SChris Zankel }
450*367b8112SChris Zankel 
451*367b8112SChris Zankel #define __clear_user  __xtensa_clear_user
452*367b8112SChris Zankel 
453*367b8112SChris Zankel 
454*367b8112SChris Zankel extern long __strncpy_user(char *, const char *, long);
455*367b8112SChris Zankel #define __strncpy_from_user __strncpy_user
456*367b8112SChris Zankel 
457*367b8112SChris Zankel static inline long
458*367b8112SChris Zankel strncpy_from_user(char *dst, const char *src, long count)
459*367b8112SChris Zankel {
460*367b8112SChris Zankel 	if (access_ok(VERIFY_READ, src, 1))
461*367b8112SChris Zankel 		return __strncpy_from_user(dst, src, count);
462*367b8112SChris Zankel 	return -EFAULT;
463*367b8112SChris Zankel }
464*367b8112SChris Zankel 
465*367b8112SChris Zankel 
466*367b8112SChris Zankel #define strlen_user(str) strnlen_user((str), TASK_SIZE - 1)
467*367b8112SChris Zankel 
468*367b8112SChris Zankel /*
469*367b8112SChris Zankel  * Return the size of a string (including the ending 0!)
470*367b8112SChris Zankel  */
471*367b8112SChris Zankel extern long __strnlen_user(const char *, long);
472*367b8112SChris Zankel 
473*367b8112SChris Zankel static inline long strnlen_user(const char *str, long len)
474*367b8112SChris Zankel {
475*367b8112SChris Zankel 	unsigned long top = __kernel_ok ? ~0UL : TASK_SIZE - 1;
476*367b8112SChris Zankel 
477*367b8112SChris Zankel 	if ((unsigned long)str > top)
478*367b8112SChris Zankel 		return 0;
479*367b8112SChris Zankel 	return __strnlen_user(str, len);
480*367b8112SChris Zankel }
481*367b8112SChris Zankel 
482*367b8112SChris Zankel 
483*367b8112SChris Zankel struct exception_table_entry
484*367b8112SChris Zankel {
485*367b8112SChris Zankel 	unsigned long insn, fixup;
486*367b8112SChris Zankel };
487*367b8112SChris Zankel 
488*367b8112SChris Zankel /* Returns 0 if exception not found and fixup.unit otherwise.  */
489*367b8112SChris Zankel 
490*367b8112SChris Zankel extern unsigned long search_exception_table(unsigned long addr);
491*367b8112SChris Zankel extern void sort_exception_table(void);
492*367b8112SChris Zankel 
493*367b8112SChris Zankel /* Returns the new pc */
494*367b8112SChris Zankel #define fixup_exception(map_reg, fixup_unit, pc)                \
495*367b8112SChris Zankel ({                                                              \
496*367b8112SChris Zankel 	fixup_unit;                                             \
497*367b8112SChris Zankel })
498*367b8112SChris Zankel 
499*367b8112SChris Zankel #endif	/* __ASSEMBLY__ */
500*367b8112SChris Zankel #endif	/* _XTENSA_UACCESS_H */
501