1 #ifndef _ASM_UACCESS_H
2 #define _ASM_UACCESS_H
3 
4 /*
5  * User space memory access functions
6  */
7 
8 #ifdef __KERNEL__
9 #include <linux/errno.h>
10 #include <linux/compiler.h>
11 #include <linux/string.h>
12 #include <linux/thread_info.h>
13 #include <asm/asi.h>
14 #include <asm/spitfire.h>
15 #include <asm-generic/uaccess-unaligned.h>
16 #endif
17 
18 #ifndef __ASSEMBLY__
19 
20 #include <asm/processor.h>
21 
22 /*
23  * Sparc64 is segmented, though more like the M68K than the I386.
24  * We use the secondary ASI to address user memory, which references a
25  * completely different VM map, thus there is zero chance of the user
26  * doing something queer and tricking us into poking kernel memory.
27  *
28  * What is left here is basically what is needed for the other parts of
29  * the kernel that expect to be able to manipulate, erum, "segments".
30  * Or perhaps more properly, permissions.
31  *
32  * "For historical reasons, these macros are grossly misnamed." -Linus
33  */
34 
35 #define KERNEL_DS   ((mm_segment_t) { ASI_P })
36 #define USER_DS     ((mm_segment_t) { ASI_AIUS })	/* har har har */
37 
38 #define VERIFY_READ	0
39 #define VERIFY_WRITE	1
40 
41 #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)})
42 #define get_ds() (KERNEL_DS)
43 
44 #define segment_eq(a, b)  ((a).seg == (b).seg)
45 
46 #define set_fs(val)								\
47 do {										\
48 	current_thread_info()->current_ds = (val).seg;				\
49 	__asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg));	\
50 } while(0)
51 
52 static inline int __access_ok(const void __user * addr, unsigned long size)
53 {
54 	return 1;
55 }
56 
57 static inline int access_ok(int type, const void __user * addr, unsigned long size)
58 {
59 	return 1;
60 }
61 
62 /*
63  * The exception table consists of pairs of addresses: the first is the
64  * address of an instruction that is allowed to fault, and the second is
65  * the address at which the program should continue.  No registers are
66  * modified, so it is entirely up to the continuation code to figure out
67  * what to do.
68  *
69  * All the routines below use bits of fixup code that are out of line
70  * with the main instruction path.  This means when everything is well,
71  * we don't even have to jump over them.  Further, they do not intrude
72  * on our cache or tlb entries.
73  */
74 
75 struct exception_table_entry {
76         unsigned int insn, fixup;
77 };
78 
79 void __ret_efault(void);
80 void __retl_efault(void);
81 
82 /* Uh, these should become the main single-value transfer routines..
83  * They automatically use the right size if we just have the right
84  * pointer type..
85  *
86  * This gets kind of ugly. We want to return _two_ values in "get_user()"
87  * and yet we don't want to do any pointers, because that is too much
88  * of a performance impact. Thus we have a few rather ugly macros here,
89  * and hide all the ugliness from the user.
90  */
91 #define put_user(x, ptr) ({ \
92 	unsigned long __pu_addr = (unsigned long)(ptr); \
93 	__chk_user_ptr(ptr); \
94 	__put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\
95 })
96 
97 #define get_user(x, ptr) ({ \
98 	unsigned long __gu_addr = (unsigned long)(ptr); \
99 	__chk_user_ptr(ptr); \
100 	__get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\
101 })
102 
103 #define __put_user(x, ptr) put_user(x, ptr)
104 #define __get_user(x, ptr) get_user(x, ptr)
105 
106 struct __large_struct { unsigned long buf[100]; };
107 #define __m(x) ((struct __large_struct *)(x))
108 
109 #define __put_user_nocheck(data, addr, size) ({			\
110 	register int __pu_ret;					\
111 	switch (size) {						\
112 	case 1: __put_user_asm(data, b, addr, __pu_ret); break;	\
113 	case 2: __put_user_asm(data, h, addr, __pu_ret); break;	\
114 	case 4: __put_user_asm(data, w, addr, __pu_ret); break;	\
115 	case 8: __put_user_asm(data, x, addr, __pu_ret); break;	\
116 	default: __pu_ret = __put_user_bad(); break;		\
117 	}							\
118 	__pu_ret;						\
119 })
120 
121 #define __put_user_asm(x, size, addr, ret)				\
122 __asm__ __volatile__(							\
123 		"/* Put user asm, inline. */\n"				\
124 	"1:\t"	"st"#size "a %1, [%2] %%asi\n\t"			\
125 		"clr	%0\n"						\
126 	"2:\n\n\t"							\
127 		".section .fixup,#alloc,#execinstr\n\t"			\
128 		".align	4\n"						\
129 	"3:\n\t"							\
130 		"sethi	%%hi(2b), %0\n\t"				\
131 		"jmpl	%0 + %%lo(2b), %%g0\n\t"			\
132 		" mov	%3, %0\n\n\t"					\
133 		".previous\n\t"						\
134 		".section __ex_table,\"a\"\n\t"				\
135 		".align	4\n\t"						\
136 		".word	1b, 3b\n\t"					\
137 		".previous\n\n\t"					\
138 	       : "=r" (ret) : "r" (x), "r" (__m(addr)),			\
139 		 "i" (-EFAULT))
140 
141 int __put_user_bad(void);
142 
143 #define __get_user_nocheck(data, addr, size, type) ({			     \
144 	register int __gu_ret;						     \
145 	register unsigned long __gu_val;				     \
146 	switch (size) {							     \
147 		case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
148 		case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
149 		case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \
150 		case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break;  \
151 		default:						     \
152 			__gu_val = 0;					     \
153 			__gu_ret = __get_user_bad();			     \
154 			break;						     \
155 	} 								     \
156 	data = (__force type) __gu_val;					     \
157 	 __gu_ret;							     \
158 })
159 
160 #define __get_user_nocheck_ret(data, addr, size, type, retval) ({	\
161 	register unsigned long __gu_val __asm__ ("l1");			\
162 	switch (size) {							\
163 	case 1: __get_user_asm_ret(__gu_val, ub, addr, retval); break;	\
164 	case 2: __get_user_asm_ret(__gu_val, uh, addr, retval); break;	\
165 	case 4: __get_user_asm_ret(__gu_val, uw, addr, retval); break;	\
166 	case 8: __get_user_asm_ret(__gu_val, x, addr, retval); break;	\
167 	default:							\
168 		if (__get_user_bad())					\
169 			return retval;					\
170 	}								\
171 	data = (__force type) __gu_val;					\
172 })
173 
174 #define __get_user_asm(x, size, addr, ret)				\
175 __asm__ __volatile__(							\
176 		"/* Get user asm, inline. */\n"				\
177 	"1:\t"	"ld"#size "a [%2] %%asi, %1\n\t"			\
178 		"clr	%0\n"						\
179 	"2:\n\n\t"							\
180 		".section .fixup,#alloc,#execinstr\n\t"			\
181 		".align	4\n"						\
182 	"3:\n\t"							\
183 		"sethi	%%hi(2b), %0\n\t"				\
184 		"clr	%1\n\t"						\
185 		"jmpl	%0 + %%lo(2b), %%g0\n\t"			\
186 		" mov	%3, %0\n\n\t"					\
187 		".previous\n\t"						\
188 		".section __ex_table,\"a\"\n\t"				\
189 		".align	4\n\t"						\
190 		".word	1b, 3b\n\n\t"					\
191 		".previous\n\t"						\
192 	       : "=r" (ret), "=r" (x) : "r" (__m(addr)),		\
193 		 "i" (-EFAULT))
194 
195 #define __get_user_asm_ret(x, size, addr, retval)			\
196 if (__builtin_constant_p(retval) && retval == -EFAULT)			\
197 	__asm__ __volatile__(						\
198 		"/* Get user asm ret, inline. */\n"			\
199 	"1:\t"	"ld"#size "a [%1] %%asi, %0\n\n\t"			\
200 		".section __ex_table,\"a\"\n\t"				\
201 		".align	4\n\t"						\
202 		".word	1b,__ret_efault\n\n\t"				\
203 		".previous\n\t"						\
204 	       : "=r" (x) : "r" (__m(addr)));				\
205 else									\
206 	__asm__ __volatile__(						\
207 		"/* Get user asm ret, inline. */\n"			\
208 	"1:\t"	"ld"#size "a [%1] %%asi, %0\n\n\t"			\
209 		".section .fixup,#alloc,#execinstr\n\t"			\
210 		".align	4\n"						\
211 	"3:\n\t"							\
212 		"ret\n\t"						\
213 		" restore %%g0, %2, %%o0\n\n\t"				\
214 		".previous\n\t"						\
215 		".section __ex_table,\"a\"\n\t"				\
216 		".align	4\n\t"						\
217 		".word	1b, 3b\n\n\t"					\
218 		".previous\n\t"						\
219 	       : "=r" (x) : "r" (__m(addr)), "i" (retval))
220 
221 int __get_user_bad(void);
222 
223 unsigned long __must_check ___copy_from_user(void *to,
224 					     const void __user *from,
225 					     unsigned long size);
226 unsigned long copy_from_user_fixup(void *to, const void __user *from,
227 				   unsigned long size);
228 static inline unsigned long __must_check
229 copy_from_user(void *to, const void __user *from, unsigned long size)
230 {
231 	unsigned long ret = ___copy_from_user(to, from, size);
232 
233 	if (unlikely(ret))
234 		ret = copy_from_user_fixup(to, from, size);
235 
236 	return ret;
237 }
238 #define __copy_from_user copy_from_user
239 
240 unsigned long __must_check ___copy_to_user(void __user *to,
241 					   const void *from,
242 					   unsigned long size);
243 unsigned long copy_to_user_fixup(void __user *to, const void *from,
244 				 unsigned long size);
245 static inline unsigned long __must_check
246 copy_to_user(void __user *to, const void *from, unsigned long size)
247 {
248 	unsigned long ret = ___copy_to_user(to, from, size);
249 
250 	if (unlikely(ret))
251 		ret = copy_to_user_fixup(to, from, size);
252 	return ret;
253 }
254 #define __copy_to_user copy_to_user
255 
256 unsigned long __must_check ___copy_in_user(void __user *to,
257 					   const void __user *from,
258 					   unsigned long size);
259 unsigned long copy_in_user_fixup(void __user *to, void __user *from,
260 				 unsigned long size);
261 static inline unsigned long __must_check
262 copy_in_user(void __user *to, void __user *from, unsigned long size)
263 {
264 	unsigned long ret = ___copy_in_user(to, from, size);
265 
266 	if (unlikely(ret))
267 		ret = copy_in_user_fixup(to, from, size);
268 	return ret;
269 }
270 #define __copy_in_user copy_in_user
271 
272 unsigned long __must_check __clear_user(void __user *, unsigned long);
273 
274 #define clear_user __clear_user
275 
276 __must_check long strlen_user(const char __user *str);
277 __must_check long strnlen_user(const char __user *str, long n);
278 
279 #define __copy_to_user_inatomic __copy_to_user
280 #define __copy_from_user_inatomic __copy_from_user
281 
282 struct pt_regs;
283 unsigned long compute_effective_address(struct pt_regs *,
284 					unsigned int insn,
285 					unsigned int rd);
286 
287 #endif  /* __ASSEMBLY__ */
288 
289 #endif /* _ASM_UACCESS_H */
290