xref: /openbmc/linux/arch/sparc/include/asm/uaccess_32.h (revision 8fa5723aa7e053d498336b48448b292fc2e0458b)
1 /*
2  * uaccess.h: User space memore access functions.
3  *
4  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5  * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6  */
7 #ifndef _ASM_UACCESS_H
8 #define _ASM_UACCESS_H
9 
10 #ifdef __KERNEL__
11 #include <linux/compiler.h>
12 #include <linux/sched.h>
13 #include <linux/string.h>
14 #include <linux/errno.h>
15 #include <asm/vac-ops.h>
16 #endif
17 
18 #ifndef __ASSEMBLY__
19 
20 /* Sparc is not segmented, however we need to be able to fool access_ok()
21  * when doing system calls from kernel mode legitimately.
22  *
23  * "For historical reasons, these macros are grossly misnamed." -Linus
24  */
25 
26 #define KERNEL_DS   ((mm_segment_t) { 0 })
27 #define USER_DS     ((mm_segment_t) { -1 })
28 
29 #define VERIFY_READ	0
30 #define VERIFY_WRITE	1
31 
32 #define get_ds()	(KERNEL_DS)
33 #define get_fs()	(current->thread.current_ds)
34 #define set_fs(val)	((current->thread.current_ds) = (val))
35 
36 #define segment_eq(a,b)	((a).seg == (b).seg)
37 
38 /* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test
39  * can be fairly lightweight.
40  * No one can read/write anything from userland in the kernel space by setting
41  * large size and address near to PAGE_OFFSET - a fault will break his intentions.
42  */
43 #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
44 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
45 #define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size)))
46 #define access_ok(type, addr, size)					\
47 	({ (void)(type); __access_ok((unsigned long)(addr), size); })
48 
49 /*
50  * The exception table consists of pairs of addresses: the first is the
51  * address of an instruction that is allowed to fault, and the second is
52  * the address at which the program should continue.  No registers are
53  * modified, so it is entirely up to the continuation code to figure out
54  * what to do.
55  *
56  * All the routines below use bits of fixup code that are out of line
57  * with the main instruction path.  This means when everything is well,
58  * we don't even have to jump over them.  Further, they do not intrude
59  * on our cache or tlb entries.
60  *
61  * There is a special way how to put a range of potentially faulting
62  * insns (like twenty ldd/std's with now intervening other instructions)
63  * You specify address of first in insn and 0 in fixup and in the next
64  * exception_table_entry you specify last potentially faulting insn + 1
65  * and in fixup the routine which should handle the fault.
66  * That fixup code will get
67  * (faulting_insn_address - first_insn_in_the_range_address)/4
68  * in %g2 (ie. index of the faulting instruction in the range).
69  */
70 
71 struct exception_table_entry
72 {
73         unsigned long insn, fixup;
74 };
75 
76 /* Returns 0 if exception not found and fixup otherwise.  */
77 extern unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
78 
79 extern void __ret_efault(void);
80 
81 /* Uh, these should become the main single-value transfer routines..
82  * They automatically use the right size if we just have the right
83  * pointer type..
84  *
85  * This gets kind of ugly. We want to return _two_ values in "get_user()"
86  * and yet we don't want to do any pointers, because that is too much
87  * of a performance impact. Thus we have a few rather ugly macros here,
88  * and hide all the ugliness from the user.
89  */
90 #define put_user(x,ptr) ({ \
91 unsigned long __pu_addr = (unsigned long)(ptr); \
92 __chk_user_ptr(ptr); \
93 __put_user_check((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); })
94 
95 #define get_user(x,ptr) ({ \
96 unsigned long __gu_addr = (unsigned long)(ptr); \
97 __chk_user_ptr(ptr); \
98 __get_user_check((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); })
99 
100 /*
101  * The "__xxx" versions do not do address space checking, useful when
102  * doing multiple accesses to the same area (the user has to do the
103  * checks by hand with "access_ok()")
104  */
105 #define __put_user(x,ptr) __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
106 #define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)),__typeof__(*(ptr)))
107 
108 struct __large_struct { unsigned long buf[100]; };
109 #define __m(x) ((struct __large_struct __user *)(x))
110 
111 #define __put_user_check(x,addr,size) ({ \
112 register int __pu_ret; \
113 if (__access_ok(addr,size)) { \
114 switch (size) { \
115 case 1: __put_user_asm(x,b,addr,__pu_ret); break; \
116 case 2: __put_user_asm(x,h,addr,__pu_ret); break; \
117 case 4: __put_user_asm(x,,addr,__pu_ret); break; \
118 case 8: __put_user_asm(x,d,addr,__pu_ret); break; \
119 default: __pu_ret = __put_user_bad(); break; \
120 } } else { __pu_ret = -EFAULT; } __pu_ret; })
121 
122 #define __put_user_nocheck(x,addr,size) ({ \
123 register int __pu_ret; \
124 switch (size) { \
125 case 1: __put_user_asm(x,b,addr,__pu_ret); break; \
126 case 2: __put_user_asm(x,h,addr,__pu_ret); break; \
127 case 4: __put_user_asm(x,,addr,__pu_ret); break; \
128 case 8: __put_user_asm(x,d,addr,__pu_ret); break; \
129 default: __pu_ret = __put_user_bad(); break; \
130 } __pu_ret; })
131 
132 #define __put_user_asm(x,size,addr,ret)					\
133 __asm__ __volatile__(							\
134 	"/* Put user asm, inline. */\n"					\
135 "1:\t"	"st"#size " %1, %2\n\t"						\
136 	"clr	%0\n"							\
137 "2:\n\n\t"								\
138 	".section .fixup,#alloc,#execinstr\n\t"				\
139 	".align	4\n"							\
140 "3:\n\t"								\
141 	"b	2b\n\t"							\
142 	" mov	%3, %0\n\t"						\
143         ".previous\n\n\t"						\
144 	".section __ex_table,#alloc\n\t"				\
145 	".align	4\n\t"							\
146 	".word	1b, 3b\n\t"						\
147 	".previous\n\n\t"						\
148        : "=&r" (ret) : "r" (x), "m" (*__m(addr)),			\
149 	 "i" (-EFAULT))
150 
151 extern int __put_user_bad(void);
152 
153 #define __get_user_check(x,addr,size,type) ({ \
154 register int __gu_ret; \
155 register unsigned long __gu_val; \
156 if (__access_ok(addr,size)) { \
157 switch (size) { \
158 case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
159 case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
160 case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \
161 case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \
162 default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
163 } } else { __gu_val = 0; __gu_ret = -EFAULT; } x = (type) __gu_val; __gu_ret; })
164 
165 #define __get_user_check_ret(x,addr,size,type,retval) ({ \
166 register unsigned long __gu_val __asm__ ("l1"); \
167 if (__access_ok(addr,size)) { \
168 switch (size) { \
169 case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
170 case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
171 case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \
172 case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \
173 default: if (__get_user_bad()) return retval; \
174 } x = (type) __gu_val; } else return retval; })
175 
176 #define __get_user_nocheck(x,addr,size,type) ({ \
177 register int __gu_ret; \
178 register unsigned long __gu_val; \
179 switch (size) { \
180 case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
181 case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
182 case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \
183 case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \
184 default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
185 } x = (type) __gu_val; __gu_ret; })
186 
187 #define __get_user_nocheck_ret(x,addr,size,type,retval) ({ \
188 register unsigned long __gu_val __asm__ ("l1"); \
189 switch (size) { \
190 case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
191 case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
192 case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \
193 case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \
194 default: if (__get_user_bad()) return retval; \
195 } x = (type) __gu_val; })
196 
197 #define __get_user_asm(x,size,addr,ret)					\
198 __asm__ __volatile__(							\
199 	"/* Get user asm, inline. */\n"					\
200 "1:\t"	"ld"#size " %2, %1\n\t"						\
201 	"clr	%0\n"							\
202 "2:\n\n\t"								\
203 	".section .fixup,#alloc,#execinstr\n\t"				\
204 	".align	4\n"							\
205 "3:\n\t"								\
206 	"clr	%1\n\t"							\
207 	"b	2b\n\t"							\
208 	" mov	%3, %0\n\n\t"						\
209 	".previous\n\t"							\
210 	".section __ex_table,#alloc\n\t"				\
211 	".align	4\n\t"							\
212 	".word	1b, 3b\n\n\t"						\
213 	".previous\n\t"							\
214        : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)),			\
215 	 "i" (-EFAULT))
216 
217 #define __get_user_asm_ret(x,size,addr,retval)				\
218 if (__builtin_constant_p(retval) && retval == -EFAULT)			\
219 __asm__ __volatile__(							\
220 	"/* Get user asm ret, inline. */\n"				\
221 "1:\t"	"ld"#size " %1, %0\n\n\t"					\
222 	".section __ex_table,#alloc\n\t"				\
223 	".align	4\n\t"							\
224 	".word	1b,__ret_efault\n\n\t"					\
225 	".previous\n\t"							\
226        : "=&r" (x) : "m" (*__m(addr)));					\
227 else									\
228 __asm__ __volatile__(							\
229 	"/* Get user asm ret, inline. */\n"				\
230 "1:\t"	"ld"#size " %1, %0\n\n\t"					\
231 	".section .fixup,#alloc,#execinstr\n\t"				\
232 	".align	4\n"							\
233 "3:\n\t"								\
234 	"ret\n\t"							\
235 	" restore %%g0, %2, %%o0\n\n\t"					\
236 	".previous\n\t"							\
237 	".section __ex_table,#alloc\n\t"				\
238 	".align	4\n\t"							\
239 	".word	1b, 3b\n\n\t"						\
240 	".previous\n\t"							\
241        : "=&r" (x) : "m" (*__m(addr)), "i" (retval))
242 
243 extern int __get_user_bad(void);
244 
245 extern unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size);
246 
247 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
248 {
249 	if (n && __access_ok((unsigned long) to, n))
250 		return __copy_user(to, (__force void __user *) from, n);
251 	else
252 		return n;
253 }
254 
255 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
256 {
257 	return __copy_user(to, (__force void __user *) from, n);
258 }
259 
260 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
261 {
262 	if (n && __access_ok((unsigned long) from, n))
263 		return __copy_user((__force void __user *) to, from, n);
264 	else
265 		return n;
266 }
267 
268 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
269 {
270 	return __copy_user((__force void __user *) to, from, n);
271 }
272 
273 #define __copy_to_user_inatomic __copy_to_user
274 #define __copy_from_user_inatomic __copy_from_user
275 
276 static inline unsigned long __clear_user(void __user *addr, unsigned long size)
277 {
278 	unsigned long ret;
279 
280 	__asm__ __volatile__ (
281 		".section __ex_table,#alloc\n\t"
282 		".align 4\n\t"
283 		".word 1f,3\n\t"
284 		".previous\n\t"
285 		"mov %2, %%o1\n"
286 		"1:\n\t"
287 		"call __bzero\n\t"
288 		" mov %1, %%o0\n\t"
289 		"mov %%o0, %0\n"
290 		: "=r" (ret) : "r" (addr), "r" (size) :
291 		"o0", "o1", "o2", "o3", "o4", "o5", "o7",
292 		"g1", "g2", "g3", "g4", "g5", "g7", "cc");
293 
294 	return ret;
295 }
296 
297 static inline unsigned long clear_user(void __user *addr, unsigned long n)
298 {
299 	if (n && __access_ok((unsigned long) addr, n))
300 		return __clear_user(addr, n);
301 	else
302 		return n;
303 }
304 
305 extern long __strncpy_from_user(char *dest, const char __user *src, long count);
306 
307 static inline long strncpy_from_user(char *dest, const char __user *src, long count)
308 {
309 	if (__access_ok((unsigned long) src, count))
310 		return __strncpy_from_user(dest, src, count);
311 	else
312 		return -EFAULT;
313 }
314 
315 extern long __strlen_user(const char __user *);
316 extern long __strnlen_user(const char __user *, long len);
317 
318 static inline long strlen_user(const char __user *str)
319 {
320 	if (!access_ok(VERIFY_READ, str, 0))
321 		return 0;
322 	else
323 		return __strlen_user(str);
324 }
325 
326 static inline long strnlen_user(const char __user *str, long len)
327 {
328 	if (!access_ok(VERIFY_READ, str, 0))
329 		return 0;
330 	else
331 		return __strnlen_user(str, len);
332 }
333 
334 #endif  /* __ASSEMBLY__ */
335 
336 #endif /* _ASM_UACCESS_H */
337