xref: /openbmc/linux/arch/parisc/include/asm/uaccess.h (revision c61c25eb02757ecf697015ef4ae3675c5e114e2e)
1deae26bfSKyle McMartin #ifndef __PARISC_UACCESS_H
2deae26bfSKyle McMartin #define __PARISC_UACCESS_H
3deae26bfSKyle McMartin 
4deae26bfSKyle McMartin /*
5deae26bfSKyle McMartin  * User space memory access functions
6deae26bfSKyle McMartin  */
7deae26bfSKyle McMartin #include <asm/page.h>
8deae26bfSKyle McMartin #include <asm/system.h>
9deae26bfSKyle McMartin #include <asm/cache.h>
10deae26bfSKyle McMartin #include <asm-generic/uaccess.h>
11deae26bfSKyle McMartin 
12deae26bfSKyle McMartin #define VERIFY_READ 0
13deae26bfSKyle McMartin #define VERIFY_WRITE 1
14deae26bfSKyle McMartin 
15deae26bfSKyle McMartin #define KERNEL_DS	((mm_segment_t){0})
16deae26bfSKyle McMartin #define USER_DS 	((mm_segment_t){1})
17deae26bfSKyle McMartin 
18deae26bfSKyle McMartin #define segment_eq(a,b)	((a).seg == (b).seg)
19deae26bfSKyle McMartin 
20deae26bfSKyle McMartin #define get_ds()	(KERNEL_DS)
21deae26bfSKyle McMartin #define get_fs()	(current_thread_info()->addr_limit)
22deae26bfSKyle McMartin #define set_fs(x)	(current_thread_info()->addr_limit = (x))
23deae26bfSKyle McMartin 
24deae26bfSKyle McMartin /*
25deae26bfSKyle McMartin  * Note that since kernel addresses are in a separate address space on
26deae26bfSKyle McMartin  * parisc, we don't need to do anything for access_ok().
27deae26bfSKyle McMartin  * We just let the page fault handler do the right thing. This also means
28deae26bfSKyle McMartin  * that put_user is the same as __put_user, etc.
29deae26bfSKyle McMartin  */
30deae26bfSKyle McMartin 
31deae26bfSKyle McMartin extern int __get_kernel_bad(void);
32deae26bfSKyle McMartin extern int __get_user_bad(void);
33deae26bfSKyle McMartin extern int __put_kernel_bad(void);
34deae26bfSKyle McMartin extern int __put_user_bad(void);
35deae26bfSKyle McMartin 
36deae26bfSKyle McMartin static inline long access_ok(int type, const void __user * addr,
37deae26bfSKyle McMartin 		unsigned long size)
38deae26bfSKyle McMartin {
39deae26bfSKyle McMartin 	return 1;
40deae26bfSKyle McMartin }
41deae26bfSKyle McMartin 
42deae26bfSKyle McMartin #define put_user __put_user
43deae26bfSKyle McMartin #define get_user __get_user
44deae26bfSKyle McMartin 
45deae26bfSKyle McMartin #if !defined(CONFIG_64BIT)
46deae26bfSKyle McMartin #define LDD_KERNEL(ptr)		__get_kernel_bad();
47deae26bfSKyle McMartin #define LDD_USER(ptr)		__get_user_bad();
48deae26bfSKyle McMartin #define STD_KERNEL(x, ptr)	__put_kernel_asm64(x,ptr)
49deae26bfSKyle McMartin #define STD_USER(x, ptr)	__put_user_asm64(x,ptr)
50deae26bfSKyle McMartin #define ASM_WORD_INSN		".word\t"
51deae26bfSKyle McMartin #else
52deae26bfSKyle McMartin #define LDD_KERNEL(ptr)		__get_kernel_asm("ldd",ptr)
53deae26bfSKyle McMartin #define LDD_USER(ptr)		__get_user_asm("ldd",ptr)
54deae26bfSKyle McMartin #define STD_KERNEL(x, ptr)	__put_kernel_asm("std",x,ptr)
55deae26bfSKyle McMartin #define STD_USER(x, ptr)	__put_user_asm("std",x,ptr)
56deae26bfSKyle McMartin #define ASM_WORD_INSN		".dword\t"
57deae26bfSKyle McMartin #endif
58deae26bfSKyle McMartin 
59deae26bfSKyle McMartin /*
60deae26bfSKyle McMartin  * The exception table contains two values: the first is an address
61deae26bfSKyle McMartin  * for an instruction that is allowed to fault, and the second is
62deae26bfSKyle McMartin  * the address to the fixup routine.
63deae26bfSKyle McMartin  */
64deae26bfSKyle McMartin 
65deae26bfSKyle McMartin struct exception_table_entry {
66deae26bfSKyle McMartin 	unsigned long insn;  /* address of insn that is allowed to fault.   */
67deae26bfSKyle McMartin 	long fixup;          /* fixup routine */
68deae26bfSKyle McMartin };
69deae26bfSKyle McMartin 
70deae26bfSKyle McMartin #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
71deae26bfSKyle McMartin 	".section __ex_table,\"aw\"\n"			   \
72deae26bfSKyle McMartin 	ASM_WORD_INSN #fault_addr ", " #except_addr "\n\t" \
73deae26bfSKyle McMartin 	".previous\n"
74deae26bfSKyle McMartin 
75deae26bfSKyle McMartin /*
76deae26bfSKyle McMartin  * The page fault handler stores, in a per-cpu area, the following information
77deae26bfSKyle McMartin  * if a fixup routine is available.
78deae26bfSKyle McMartin  */
79deae26bfSKyle McMartin struct exception_data {
80deae26bfSKyle McMartin 	unsigned long fault_ip;
81deae26bfSKyle McMartin 	unsigned long fault_space;
82deae26bfSKyle McMartin 	unsigned long fault_addr;
83deae26bfSKyle McMartin };
84deae26bfSKyle McMartin 
85deae26bfSKyle McMartin #define __get_user(x,ptr)                               \
86deae26bfSKyle McMartin ({                                                      \
87deae26bfSKyle McMartin 	register long __gu_err __asm__ ("r8") = 0;      \
88deae26bfSKyle McMartin 	register long __gu_val __asm__ ("r9") = 0;      \
89deae26bfSKyle McMartin 							\
90deae26bfSKyle McMartin 	if (segment_eq(get_fs(),KERNEL_DS)) {           \
91deae26bfSKyle McMartin 	    switch (sizeof(*(ptr))) {                   \
92deae26bfSKyle McMartin 	    case 1: __get_kernel_asm("ldb",ptr); break; \
93deae26bfSKyle McMartin 	    case 2: __get_kernel_asm("ldh",ptr); break; \
94deae26bfSKyle McMartin 	    case 4: __get_kernel_asm("ldw",ptr); break; \
95deae26bfSKyle McMartin 	    case 8: LDD_KERNEL(ptr); break;		\
96deae26bfSKyle McMartin 	    default: __get_kernel_bad(); break;         \
97deae26bfSKyle McMartin 	    }                                           \
98deae26bfSKyle McMartin 	}                                               \
99deae26bfSKyle McMartin 	else {                                          \
100deae26bfSKyle McMartin 	    switch (sizeof(*(ptr))) {                   \
101deae26bfSKyle McMartin 	    case 1: __get_user_asm("ldb",ptr); break;   \
102deae26bfSKyle McMartin 	    case 2: __get_user_asm("ldh",ptr); break;   \
103deae26bfSKyle McMartin 	    case 4: __get_user_asm("ldw",ptr); break;   \
104deae26bfSKyle McMartin 	    case 8: LDD_USER(ptr);  break;		\
105deae26bfSKyle McMartin 	    default: __get_user_bad(); break;           \
106deae26bfSKyle McMartin 	    }                                           \
107deae26bfSKyle McMartin 	}                                               \
108deae26bfSKyle McMartin 							\
109deae26bfSKyle McMartin 	(x) = (__typeof__(*(ptr))) __gu_val;            \
110deae26bfSKyle McMartin 	__gu_err;                                       \
111deae26bfSKyle McMartin })
112deae26bfSKyle McMartin 
113deae26bfSKyle McMartin #define __get_kernel_asm(ldx,ptr)                       \
114deae26bfSKyle McMartin 	__asm__("\n1:\t" ldx "\t0(%2),%0\n\t"		\
115deae26bfSKyle McMartin 		ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
116deae26bfSKyle McMartin 		: "=r"(__gu_val), "=r"(__gu_err)        \
117deae26bfSKyle McMartin 		: "r"(ptr), "1"(__gu_err)		\
118deae26bfSKyle McMartin 		: "r1");
119deae26bfSKyle McMartin 
120deae26bfSKyle McMartin #define __get_user_asm(ldx,ptr)                         \
121deae26bfSKyle McMartin 	__asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n\t"	\
122deae26bfSKyle McMartin 		ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_get_user_skip_1)\
123deae26bfSKyle McMartin 		: "=r"(__gu_val), "=r"(__gu_err)        \
124deae26bfSKyle McMartin 		: "r"(ptr), "1"(__gu_err)		\
125deae26bfSKyle McMartin 		: "r1");
126deae26bfSKyle McMartin 
127deae26bfSKyle McMartin #define __put_user(x,ptr)                                       \
128deae26bfSKyle McMartin ({								\
129deae26bfSKyle McMartin 	register long __pu_err __asm__ ("r8") = 0;      	\
130deae26bfSKyle McMartin         __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x);	\
131deae26bfSKyle McMartin 								\
132deae26bfSKyle McMartin 	if (segment_eq(get_fs(),KERNEL_DS)) {                   \
133deae26bfSKyle McMartin 	    switch (sizeof(*(ptr))) {                           \
134deae26bfSKyle McMartin 	    case 1: __put_kernel_asm("stb",__x,ptr); break;     \
135deae26bfSKyle McMartin 	    case 2: __put_kernel_asm("sth",__x,ptr); break;     \
136deae26bfSKyle McMartin 	    case 4: __put_kernel_asm("stw",__x,ptr); break;     \
137deae26bfSKyle McMartin 	    case 8: STD_KERNEL(__x,ptr); break;			\
138deae26bfSKyle McMartin 	    default: __put_kernel_bad(); break;			\
139deae26bfSKyle McMartin 	    }                                                   \
140deae26bfSKyle McMartin 	}                                                       \
141deae26bfSKyle McMartin 	else {                                                  \
142deae26bfSKyle McMartin 	    switch (sizeof(*(ptr))) {                           \
143deae26bfSKyle McMartin 	    case 1: __put_user_asm("stb",__x,ptr); break;       \
144deae26bfSKyle McMartin 	    case 2: __put_user_asm("sth",__x,ptr); break;       \
145deae26bfSKyle McMartin 	    case 4: __put_user_asm("stw",__x,ptr); break;       \
146deae26bfSKyle McMartin 	    case 8: STD_USER(__x,ptr); break;			\
147deae26bfSKyle McMartin 	    default: __put_user_bad(); break;			\
148deae26bfSKyle McMartin 	    }                                                   \
149deae26bfSKyle McMartin 	}                                                       \
150deae26bfSKyle McMartin 								\
151deae26bfSKyle McMartin 	__pu_err;						\
152deae26bfSKyle McMartin })
153deae26bfSKyle McMartin 
154deae26bfSKyle McMartin /*
155deae26bfSKyle McMartin  * The "__put_user/kernel_asm()" macros tell gcc they read from memory
156deae26bfSKyle McMartin  * instead of writing. This is because they do not write to any memory
157deae26bfSKyle McMartin  * gcc knows about, so there are no aliasing issues. These macros must
158deae26bfSKyle McMartin  * also be aware that "fixup_put_user_skip_[12]" are executed in the
159deae26bfSKyle McMartin  * context of the fault, and any registers used there must be listed
160deae26bfSKyle McMartin  * as clobbers. In this case only "r1" is used by the current routines.
161deae26bfSKyle McMartin  * r8/r9 are already listed as err/val.
162deae26bfSKyle McMartin  */
163deae26bfSKyle McMartin 
164deae26bfSKyle McMartin #define __put_kernel_asm(stx,x,ptr)                         \
165deae26bfSKyle McMartin 	__asm__ __volatile__ (                              \
166deae26bfSKyle McMartin 		"\n1:\t" stx "\t%2,0(%1)\n\t"		    \
167deae26bfSKyle McMartin 		ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_1)\
168deae26bfSKyle McMartin 		: "=r"(__pu_err)                            \
169deae26bfSKyle McMartin 		: "r"(ptr), "r"(x), "0"(__pu_err)	    \
170deae26bfSKyle McMartin 	    	: "r1")
171deae26bfSKyle McMartin 
172deae26bfSKyle McMartin #define __put_user_asm(stx,x,ptr)                           \
173deae26bfSKyle McMartin 	__asm__ __volatile__ (                              \
174deae26bfSKyle McMartin 		"\n1:\t" stx "\t%2,0(%%sr3,%1)\n\t"	    \
175deae26bfSKyle McMartin 		ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_1)\
176deae26bfSKyle McMartin 		: "=r"(__pu_err)                            \
177deae26bfSKyle McMartin 		: "r"(ptr), "r"(x), "0"(__pu_err)	    \
178deae26bfSKyle McMartin 		: "r1")
179deae26bfSKyle McMartin 
180deae26bfSKyle McMartin 
181deae26bfSKyle McMartin #if !defined(CONFIG_64BIT)
182deae26bfSKyle McMartin 
183deae26bfSKyle McMartin #define __put_kernel_asm64(__val,ptr) do {		    \
184deae26bfSKyle McMartin 	u64 __val64 = (u64)(__val);			    \
185deae26bfSKyle McMartin 	u32 hi = (__val64) >> 32;			    \
186deae26bfSKyle McMartin 	u32 lo = (__val64) & 0xffffffff;		    \
187deae26bfSKyle McMartin 	__asm__ __volatile__ (				    \
188deae26bfSKyle McMartin 		"\n1:\tstw %2,0(%1)"			    \
189deae26bfSKyle McMartin 		"\n2:\tstw %3,4(%1)\n\t"		    \
190deae26bfSKyle McMartin 		ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
191deae26bfSKyle McMartin 		ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
192deae26bfSKyle McMartin 		: "=r"(__pu_err)                            \
193deae26bfSKyle McMartin 		: "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \
194deae26bfSKyle McMartin 		: "r1");				    \
195deae26bfSKyle McMartin } while (0)
196deae26bfSKyle McMartin 
197deae26bfSKyle McMartin #define __put_user_asm64(__val,ptr) do {	    	    \
198deae26bfSKyle McMartin 	u64 __val64 = (u64)(__val);			    \
199deae26bfSKyle McMartin 	u32 hi = (__val64) >> 32;			    \
200deae26bfSKyle McMartin 	u32 lo = (__val64) & 0xffffffff;		    \
201deae26bfSKyle McMartin 	__asm__ __volatile__ (				    \
202deae26bfSKyle McMartin 		"\n1:\tstw %2,0(%%sr3,%1)"		    \
203deae26bfSKyle McMartin 		"\n2:\tstw %3,4(%%sr3,%1)\n\t"		    \
204deae26bfSKyle McMartin 		ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
205deae26bfSKyle McMartin 		ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
206deae26bfSKyle McMartin 		: "=r"(__pu_err)                            \
207deae26bfSKyle McMartin 		: "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \
208deae26bfSKyle McMartin 		: "r1");				    \
209deae26bfSKyle McMartin } while (0)
210deae26bfSKyle McMartin 
211deae26bfSKyle McMartin #endif /* !defined(CONFIG_64BIT) */
212deae26bfSKyle McMartin 
213deae26bfSKyle McMartin 
214deae26bfSKyle McMartin /*
215deae26bfSKyle McMartin  * Complex access routines -- external declarations
216deae26bfSKyle McMartin  */
217deae26bfSKyle McMartin 
218deae26bfSKyle McMartin extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long);
219deae26bfSKyle McMartin extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long);
220deae26bfSKyle McMartin extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long);
221deae26bfSKyle McMartin extern long lstrncpy_from_user(char *, const char __user *, long);
222deae26bfSKyle McMartin extern unsigned lclear_user(void __user *,unsigned long);
223deae26bfSKyle McMartin extern long lstrnlen_user(const char __user *,long);
224deae26bfSKyle McMartin 
225deae26bfSKyle McMartin /*
226deae26bfSKyle McMartin  * Complex access routines -- macros
227deae26bfSKyle McMartin  */
228deae26bfSKyle McMartin 
229deae26bfSKyle McMartin #define strncpy_from_user lstrncpy_from_user
230deae26bfSKyle McMartin #define strnlen_user lstrnlen_user
231deae26bfSKyle McMartin #define strlen_user(str) lstrnlen_user(str, 0x7fffffffL)
232deae26bfSKyle McMartin #define clear_user lclear_user
233deae26bfSKyle McMartin #define __clear_user lclear_user
234deae26bfSKyle McMartin 
235deae26bfSKyle McMartin unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len);
236deae26bfSKyle McMartin #define __copy_to_user copy_to_user
237deae26bfSKyle McMartin unsigned long copy_from_user(void *dst, const void __user *src, unsigned long len);
238deae26bfSKyle McMartin #define __copy_from_user copy_from_user
239deae26bfSKyle McMartin unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len);
240deae26bfSKyle McMartin #define __copy_in_user copy_in_user
241deae26bfSKyle McMartin #define __copy_to_user_inatomic __copy_to_user
242deae26bfSKyle McMartin #define __copy_from_user_inatomic __copy_from_user
243deae26bfSKyle McMartin 
244*c61c25ebSKyle McMartin int fixup_exception(struct pt_regs *regs);
245*c61c25ebSKyle McMartin 
246deae26bfSKyle McMartin #endif /* __PARISC_UACCESS_H */
247