1 /*
2  * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3  * Copyright (C) 2008-2009 PetaLogix
4  * Copyright (C) 2006 Atmark Techno, Inc.
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License. See the file "COPYING" in the main directory of this archive
8  * for more details.
9  */
10 
11 #ifndef _ASM_MICROBLAZE_UACCESS_H
12 #define _ASM_MICROBLAZE_UACCESS_H
13 
14 #ifdef __KERNEL__
15 #ifndef __ASSEMBLY__
16 
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h> /* RLIMIT_FSIZE */
20 #include <linux/mm.h>
21 
22 #include <asm/mmu.h>
23 #include <asm/page.h>
24 #include <asm/pgtable.h>
25 #include <asm/segment.h>
26 #include <linux/string.h>
27 
28 #define VERIFY_READ	0
29 #define VERIFY_WRITE	1
30 
31 #define __clear_user(addr, n)	(memset((void *)(addr), 0, (n)), 0)
32 
33 #ifndef CONFIG_MMU
34 
35 extern int ___range_ok(unsigned long addr, unsigned long size);
36 
37 #define __range_ok(addr, size) \
38 		___range_ok((unsigned long)(addr), (unsigned long)(size))
39 
40 #define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
41 #define __access_ok(add, size) (__range_ok((addr), (size)) == 0)
42 
43 /* Undefined function to trigger linker error */
44 extern int bad_user_access_length(void);
45 
46 /* FIXME this is function for optimalization -> memcpy */
47 #define __get_user(var, ptr)				\
48 ({							\
49 	int __gu_err = 0;				\
50 	switch (sizeof(*(ptr))) {			\
51 	case 1:						\
52 	case 2:						\
53 	case 4:						\
54 		(var) = *(ptr);				\
55 		break;					\
56 	case 8:						\
57 		memcpy((void *) &(var), (ptr), 8);	\
58 		break;					\
59 	default:					\
60 		(var) = 0;				\
61 		__gu_err = __get_user_bad();		\
62 		break;					\
63 	}						\
64 	__gu_err;					\
65 })
66 
67 #define __get_user_bad()	(bad_user_access_length(), (-EFAULT))
68 
69 /* FIXME is not there defined __pu_val */
70 #define __put_user(var, ptr)					\
71 ({								\
72 	int __pu_err = 0;					\
73 	switch (sizeof(*(ptr))) {				\
74 	case 1:							\
75 	case 2:							\
76 	case 4:							\
77 		*(ptr) = (var);					\
78 		break;						\
79 	case 8: {						\
80 		typeof(*(ptr)) __pu_val = (var);		\
81 		memcpy(ptr, &__pu_val, sizeof(__pu_val));	\
82 		}						\
83 		break;						\
84 	default:						\
85 		__pu_err = __put_user_bad();			\
86 		break;						\
87 	}							\
88 	__pu_err;						\
89 })
90 
91 #define __put_user_bad()	(bad_user_access_length(), (-EFAULT))
92 
93 #define put_user(x, ptr)	__put_user((x), (ptr))
94 #define get_user(x, ptr)	__get_user((x), (ptr))
95 
96 #define copy_to_user(to, from, n)	(memcpy((to), (from), (n)), 0)
97 #define copy_from_user(to, from, n)	(memcpy((to), (from), (n)), 0)
98 
99 #define __copy_to_user(to, from, n)	(copy_to_user((to), (from), (n)))
100 #define __copy_from_user(to, from, n)	(copy_from_user((to), (from), (n)))
101 #define __copy_to_user_inatomic(to, from, n) \
102 			(__copy_to_user((to), (from), (n)))
103 #define __copy_from_user_inatomic(to, from, n) \
104 			(__copy_from_user((to), (from), (n)))
105 
106 static inline unsigned long clear_user(void *addr, unsigned long size)
107 {
108 	if (access_ok(VERIFY_WRITE, addr, size))
109 		size = __clear_user(addr, size);
110 	return size;
111 }
112 
113 /* Returns 0 if exception not found and fixup otherwise.  */
114 extern unsigned long search_exception_table(unsigned long);
115 
116 extern long strncpy_from_user(char *dst, const char *src, long count);
117 extern long strnlen_user(const char *src, long count);
118 
119 #else /* CONFIG_MMU */
120 
121 /*
122  * Address is valid if:
123  *  - "addr", "addr + size" and "size" are all below the limit
124  */
125 #define access_ok(type, addr, size) \
126 	(get_fs().seg > (((unsigned long)(addr)) | \
127 		(size) | ((unsigned long)(addr) + (size))))
128 
129 /* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
130  type?"WRITE":"READ",addr,size,get_fs().seg)) */
131 
132 /*
133  * All the __XXX versions macros/functions below do not perform
134  * access checking. It is assumed that the necessary checks have been
135  * already performed before the finction (macro) is called.
136  */
137 
138 #define get_user(x, ptr)						\
139 ({									\
140 	access_ok(VERIFY_READ, (ptr), sizeof(*(ptr)))			\
141 		? __get_user((x), (ptr)) : -EFAULT;			\
142 })
143 
144 #define put_user(x, ptr)						\
145 ({									\
146 	access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr)))			\
147 		? __put_user((x), (ptr)) : -EFAULT;			\
148 })
149 
150 #define __get_user(x, ptr)						\
151 ({									\
152 	unsigned long __gu_val;						\
153 	/*unsigned long __gu_ptr = (unsigned long)(ptr);*/		\
154 	long __gu_err;							\
155 	switch (sizeof(*(ptr))) {					\
156 	case 1:								\
157 		__get_user_asm("lbu", (ptr), __gu_val, __gu_err);	\
158 		break;							\
159 	case 2:								\
160 		__get_user_asm("lhu", (ptr), __gu_val, __gu_err);	\
161 		break;							\
162 	case 4:								\
163 		__get_user_asm("lw", (ptr), __gu_val, __gu_err);	\
164 		break;							\
165 	default:							\
166 		__gu_val = 0; __gu_err = -EINVAL;			\
167 	}								\
168 	x = (__typeof__(*(ptr))) __gu_val;				\
169 	__gu_err;							\
170 })
171 
172 #define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err)		\
173 ({									\
174 	__asm__ __volatile__ (						\
175 			"1:"	insn	" %1, %2, r0;			\
176 				addk	%0, r0, r0;			\
177 			2:						\
178 			.section .fixup,\"ax\";				\
179 			3:	brid	2b;				\
180 				addik	%0, r0, %3;			\
181 			.previous;					\
182 			.section __ex_table,\"a\";			\
183 			.word	1b,3b;					\
184 			.previous;"					\
185 		: "=r"(__gu_err), "=r"(__gu_val)			\
186 		: "r"(__gu_ptr), "i"(-EFAULT)				\
187 	);								\
188 })
189 
190 #define __put_user(x, ptr)						\
191 ({									\
192 	__typeof__(*(ptr)) volatile __gu_val = (x);			\
193 	long __gu_err = 0;						\
194 	switch (sizeof(__gu_val)) {					\
195 	case 1:								\
196 		__put_user_asm("sb", (ptr), __gu_val, __gu_err);	\
197 		break;							\
198 	case 2: 							\
199 		__put_user_asm("sh", (ptr), __gu_val, __gu_err);	\
200 		break;							\
201 	case 4:								\
202 		__put_user_asm("sw", (ptr), __gu_val, __gu_err);	\
203 		break;							\
204 	case 8:								\
205 		__put_user_asm_8((ptr), __gu_val, __gu_err);		\
206 		break;							\
207 	default:							\
208 		__gu_err = -EINVAL;					\
209 	}								\
210 	__gu_err;							\
211 })
212 
213 #define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err)	\
214 ({							\
215 __asm__ __volatile__ ("	lwi	%0, %1, 0;		\
216 		1:	swi	%0, %2, 0;		\
217 			lwi	%0, %1, 4;		\
218 		2:	swi	%0, %2, 4;		\
219 			addk	%0,r0,r0;		\
220 		3:					\
221 		.section .fixup,\"ax\";			\
222 		4:	brid	3b;			\
223 			addik	%0, r0, %3;		\
224 		.previous;				\
225 		.section __ex_table,\"a\";		\
226 		.word	1b,4b,2b,4b;			\
227 		.previous;"				\
228 	: "=&r"(__gu_err)				\
229 	: "r"(&__gu_val),				\
230 	"r"(__gu_ptr), "i"(-EFAULT)			\
231 	);						\
232 })
233 
234 #define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err)	\
235 ({								\
236 	__asm__ __volatile__ (					\
237 			"1:"	insn	" %1, %2, r0;		\
238 				addk	%0, r0, r0;		\
239 			2:					\
240 			.section .fixup,\"ax\";			\
241 			3:	brid	2b;			\
242 				addik	%0, r0, %3;		\
243 			.previous;				\
244 			.section __ex_table,\"a\";		\
245 			.word	1b,3b;				\
246 			.previous;"				\
247 		: "=r"(__gu_err)				\
248 		: "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT)	\
249 	);							\
250 })
251 
252 /*
253  * Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail.
254  */
255 static inline int clear_user(char *to, int size)
256 {
257 	if (size && access_ok(VERIFY_WRITE, to, size)) {
258 		__asm__ __volatile__ ("				\
259 				1:				\
260 					sb	r0, %2, r0;	\
261 					addik	%0, %0, -1;	\
262 					bneid	%0, 1b;		\
263 					addik	%2, %2, 1;	\
264 				2:				\
265 				.section __ex_table,\"a\";	\
266 				.word	1b,2b;			\
267 				.section .text;"		\
268 			: "=r"(size)				\
269 			: "0"(size), "r"(to)
270 		);
271 	}
272 	return size;
273 }
274 
275 #define __copy_from_user(to, from, n)	copy_from_user((to), (from), (n))
276 #define __copy_from_user_inatomic(to, from, n) \
277 		copy_from_user((to), (from), (n))
278 
279 #define copy_to_user(to, from, n)					\
280 	(access_ok(VERIFY_WRITE, (to), (n)) ?				\
281 		__copy_tofrom_user((void __user *)(to),			\
282 			(__force const void __user *)(from), (n))	\
283 		: -EFAULT)
284 
285 #define __copy_to_user(to, from, n)	copy_to_user((to), (from), (n))
286 #define __copy_to_user_inatomic(to, from, n)	copy_to_user((to), (from), (n))
287 
288 #define copy_from_user(to, from, n)					\
289 	(access_ok(VERIFY_READ, (from), (n)) ?				\
290 		__copy_tofrom_user((__force void __user *)(to),		\
291 			(void __user *)(from), (n))			\
292 		: -EFAULT)
293 
294 extern int __strncpy_user(char *to, const char __user *from, int len);
295 extern int __strnlen_user(const char __user *sstr, int len);
296 
297 #define strncpy_from_user(to, from, len)	\
298 		(access_ok(VERIFY_READ, from, 1) ?	\
299 			__strncpy_user(to, from, len) : -EFAULT)
300 #define strnlen_user(str, len)	\
301 		(access_ok(VERIFY_READ, str, 1) ? __strnlen_user(str, len) : 0)
302 
303 #endif /* CONFIG_MMU */
304 
305 extern unsigned long __copy_tofrom_user(void __user *to,
306 		const void __user *from, unsigned long size);
307 
308 /*
309  * The exception table consists of pairs of addresses: the first is the
310  * address of an instruction that is allowed to fault, and the second is
311  * the address at which the program should continue. No registers are
312  * modified, so it is entirely up to the continuation code to figure out
313  * what to do.
314  *
315  * All the routines below use bits of fixup code that are out of line
316  * with the main instruction path. This means when everything is well,
317  * we don't even have to jump over them. Further, they do not intrude
318  * on our cache or tlb entries.
319  */
320 struct exception_table_entry {
321 	unsigned long insn, fixup;
322 };
323 
324 #endif  /* __ASSEMBLY__ */
325 #endif /* __KERNEL__ */
326 
327 #endif /* _ASM_MICROBLAZE_UACCESS_H */
328