xref: /openbmc/linux/arch/csky/include/asm/uaccess.h (revision 05cf4fe738242183f1237f1b3a28b4479348c0a1)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3 
4 #ifndef __ASM_CSKY_UACCESS_H
5 #define __ASM_CSKY_UACCESS_H
6 
7 /*
8  * User space memory access functions
9  */
10 #include <linux/compiler.h>
11 #include <linux/errno.h>
12 #include <linux/types.h>
13 #include <linux/sched.h>
14 #include <linux/mm.h>
15 #include <linux/string.h>
16 #include <linux/version.h>
17 #include <asm/segment.h>
18 
19 #define VERIFY_READ	0
20 #define VERIFY_WRITE	1
21 
22 static inline int access_ok(int type, const void *addr, unsigned long size)
23 {
24 	unsigned long limit = current_thread_info()->addr_limit.seg;
25 
26 	return (((unsigned long)addr < limit) &&
27 		((unsigned long)(addr + size) < limit));
28 }
29 
30 static inline int verify_area(int type, const void *addr, unsigned long size)
31 {
32 	return access_ok(type, addr, size) ? 0 : -EFAULT;
33 }
34 
35 #define __addr_ok(addr) (access_ok(VERIFY_READ, addr, 0))
36 
37 extern int __put_user_bad(void);
38 
39 /*
40  * Tell gcc we read from memory instead of writing: this is because
41  * we do not write to any memory gcc knows about, so there are no
42  * aliasing issues.
43  */
44 
45 /*
46  * These are the main single-value transfer routines.  They automatically
47  * use the right size if we just have the right pointer type.
48  *
49  * This gets kind of ugly. We want to return _two_ values in "get_user()"
50  * and yet we don't want to do any pointers, because that is too much
51  * of a performance impact. Thus we have a few rather ugly macros here,
52  * and hide all the ugliness from the user.
53  *
54  * The "__xxx" versions of the user access functions are versions that
55  * do not verify the address space, that must have been done previously
56  * with a separate "access_ok()" call (this is used when we do multiple
57  * accesses to the same area of user memory).
58  *
59  * As we use the same address space for kernel and user data on
60  * Ckcore, we can just do these as direct assignments.  (Of course, the
61  * exception handling means that it's no longer "just"...)
62  */
63 
64 #define put_user(x, ptr) \
65 	__put_user_check((x), (ptr), sizeof(*(ptr)))
66 
67 #define __put_user(x, ptr) \
68 	__put_user_nocheck((x), (ptr), sizeof(*(ptr)))
69 
70 #define __ptr(x) ((unsigned long *)(x))
71 
72 #define get_user(x, ptr) \
73 	__get_user_check((x), (ptr), sizeof(*(ptr)))
74 
75 #define __get_user(x, ptr) \
76 	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
77 
78 #define __put_user_nocheck(x, ptr, size)				\
79 ({									\
80 	long __pu_err = 0;						\
81 	typeof(*(ptr)) *__pu_addr = (ptr);				\
82 	typeof(*(ptr)) __pu_val = (typeof(*(ptr)))(x);			\
83 	if (__pu_addr)							\
84 		__put_user_size(__pu_val, (__pu_addr), (size),		\
85 				__pu_err);				\
86 	__pu_err;							\
87 })
88 
89 #define __put_user_check(x, ptr, size)					\
90 ({									\
91 	long __pu_err = -EFAULT;					\
92 	typeof(*(ptr)) *__pu_addr = (ptr);				\
93 	typeof(*(ptr)) __pu_val = (typeof(*(ptr)))(x);			\
94 	if (access_ok(VERIFY_WRITE, __pu_addr, size) && __pu_addr)	\
95 		__put_user_size(__pu_val, __pu_addr, (size), __pu_err);	\
96 	__pu_err;							\
97 })
98 
99 #define __put_user_size(x, ptr, size, retval)		\
100 do {							\
101 	retval = 0;					\
102 	switch (size) {                                 \
103 	case 1:						\
104 		__put_user_asm_b(x, ptr, retval);	\
105 		break;					\
106 	case 2:						\
107 		__put_user_asm_h(x, ptr, retval);	\
108 		break;					\
109 	case 4:						\
110 		__put_user_asm_w(x, ptr, retval);	\
111 		break;					\
112 	case 8:						\
113 		__put_user_asm_64(x, ptr, retval);	\
114 		break;					\
115 	default:					\
116 		__put_user_bad();			\
117 	}	                                        \
118 } while (0)
119 
120 /*
121  * We don't tell gcc that we are accessing memory, but this is OK
122  * because we do not write to any memory gcc knows about, so there
123  * are no aliasing issues.
124  *
125  * Note that PC at a fault is the address *after* the faulting
126  * instruction.
127  */
128 #define __put_user_asm_b(x, ptr, err)			\
129 do {							\
130 	int errcode;					\
131 	asm volatile(					\
132 	"1:     stb   %1, (%2,0)	\n"		\
133 	"       br    3f		\n"		\
134 	"2:     mov   %0, %3		\n"		\
135 	"       br    3f		\n"		\
136 	".section __ex_table, \"a\"	\n"		\
137 	".align   2			\n"		\
138 	".long    1b,2b			\n"		\
139 	".previous			\n"		\
140 	"3:				\n"		\
141 	: "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode)	\
142 	: "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT)	\
143 	: "memory");					\
144 } while (0)
145 
146 #define __put_user_asm_h(x, ptr, err)			\
147 do {							\
148 	int errcode;					\
149 	asm volatile(					\
150 	"1:     sth   %1, (%2,0)	\n"		\
151 	"       br    3f		\n"		\
152 	"2:     mov   %0, %3		\n"		\
153 	"       br    3f		\n"		\
154 	".section __ex_table, \"a\"	\n"		\
155 	".align   2			\n"		\
156 	".long    1b,2b			\n"		\
157 	".previous			\n"		\
158 	"3:				\n"		\
159 	: "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode)	\
160 	: "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT)	\
161 	: "memory");					\
162 } while (0)
163 
164 #define __put_user_asm_w(x, ptr, err)			\
165 do {							\
166 	int errcode;					\
167 	asm volatile(					\
168 	"1:     stw   %1, (%2,0)	\n"		\
169 	"       br    3f		\n"		\
170 	"2:     mov   %0, %3		\n"		\
171 	"       br    3f		\n"		\
172 	".section __ex_table,\"a\"	\n"		\
173 	".align   2			\n"		\
174 	".long    1b, 2b		\n"		\
175 	".previous			\n"		\
176 	"3:				\n"		\
177 	: "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode)	\
178 	: "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT)	\
179 	: "memory");					\
180 } while (0)
181 
182 #define __put_user_asm_64(x, ptr, err)				\
183 do {								\
184 	int tmp;						\
185 	int errcode;						\
186 	typeof(*(ptr))src = (typeof(*(ptr)))x;			\
187 	typeof(*(ptr))*psrc = &src;				\
188 								\
189 	asm volatile(						\
190 	"     ldw     %3, (%1, 0)     \n"			\
191 	"1:   stw     %3, (%2, 0)     \n"			\
192 	"     ldw     %3, (%1, 4)     \n"			\
193 	"2:   stw     %3, (%2, 4)     \n"			\
194 	"     br      4f              \n"			\
195 	"3:   mov     %0, %4          \n"			\
196 	"     br      4f              \n"			\
197 	".section __ex_table, \"a\"   \n"			\
198 	".align   2                   \n"			\
199 	".long    1b, 3b              \n"			\
200 	".long    2b, 3b              \n"			\
201 	".previous                    \n"			\
202 	"4:                           \n"			\
203 	: "=r"(err), "=r"(psrc), "=r"(ptr),			\
204 	  "=r"(tmp), "=r"(errcode)				\
205 	: "0"(err), "1"(psrc), "2"(ptr), "3"(0), "4"(-EFAULT)	\
206 	: "memory");						\
207 } while (0)
208 
209 #define __get_user_nocheck(x, ptr, size)			\
210 ({								\
211 	long  __gu_err;						\
212 	__get_user_size(x, (ptr), (size), __gu_err);		\
213 	__gu_err;						\
214 })
215 
216 #define __get_user_check(x, ptr, size)				\
217 ({								\
218 	int __gu_err = -EFAULT;					\
219 	const __typeof__(*(ptr)) __user *__gu_ptr = (ptr);	\
220 	if (access_ok(VERIFY_READ, __gu_ptr, size) && __gu_ptr)	\
221 		__get_user_size(x, __gu_ptr, size, __gu_err);	\
222 	__gu_err;						\
223 })
224 
225 #define __get_user_size(x, ptr, size, retval)			\
226 do {								\
227 	switch (size) {						\
228 	case 1:							\
229 		__get_user_asm_common((x), ptr, "ldb", retval);	\
230 		break;						\
231 	case 2:							\
232 		__get_user_asm_common((x), ptr, "ldh", retval);	\
233 		break;						\
234 	case 4:							\
235 		__get_user_asm_common((x), ptr, "ldw", retval);	\
236 		break;						\
237 	default:						\
238 		x = 0;						\
239 		(retval) = __get_user_bad();			\
240 	}							\
241 } while (0)
242 
243 #define __get_user_asm_common(x, ptr, ins, err)			\
244 do {								\
245 	int errcode;						\
246 	asm volatile(						\
247 	"1:   " ins " %1, (%4,0)	\n"			\
248 	"       br    3f		\n"			\
249 	/* Fix up codes */					\
250 	"2:     mov   %0, %2		\n"			\
251 	"       movi  %1, 0		\n"			\
252 	"       br    3f		\n"			\
253 	".section __ex_table,\"a\"      \n"			\
254 	".align   2			\n"			\
255 	".long    1b, 2b		\n"			\
256 	".previous			\n"			\
257 	"3:				\n" 			\
258 	: "=r"(err), "=r"(x), "=r"(errcode)			\
259 	: "0"(0), "r"(ptr), "2"(-EFAULT)			\
260 	: "memory");						\
261 } while (0)
262 
263 extern int __get_user_bad(void);
264 
265 #define __copy_user(to, from, n)			\
266 do {							\
267 	int w0, w1, w2, w3;				\
268 	asm volatile(					\
269 	"0:     cmpnei  %1, 0           \n"		\
270 	"       bf      8f              \n"		\
271 	"       mov     %3, %1          \n"		\
272 	"       or      %3, %2          \n"		\
273 	"       andi    %3, 3           \n"		\
274 	"       cmpnei  %3, 0           \n"		\
275 	"       bf      1f              \n"		\
276 	"       br      5f              \n"		\
277 	"1:     cmplti  %0, 16          \n" /* 4W */	\
278 	"       bt      3f              \n"		\
279 	"       ldw     %3, (%2, 0)     \n"		\
280 	"       ldw     %4, (%2, 4)     \n"		\
281 	"       ldw     %5, (%2, 8)     \n"		\
282 	"       ldw     %6, (%2, 12)    \n"		\
283 	"2:     stw     %3, (%1, 0)     \n"		\
284 	"9:     stw     %4, (%1, 4)     \n"		\
285 	"10:    stw     %5, (%1, 8)     \n"		\
286 	"11:    stw     %6, (%1, 12)    \n"		\
287 	"       addi    %2, 16          \n"		\
288 	"       addi    %1, 16          \n"		\
289 	"       subi    %0, 16          \n"		\
290 	"       br      1b              \n"		\
291 	"3:     cmplti  %0, 4           \n" /* 1W */	\
292 	"       bt      5f              \n"		\
293 	"       ldw     %3, (%2, 0)     \n"		\
294 	"4:     stw     %3, (%1, 0)     \n"		\
295 	"       addi    %2, 4           \n"		\
296 	"       addi    %1, 4           \n"		\
297 	"       subi    %0, 4           \n"		\
298 	"       br      3b              \n"		\
299 	"5:     cmpnei  %0, 0           \n"  /* 1B */   \
300 	"       bf      8f              \n"		\
301 	"       ldb     %3, (%2, 0)     \n"		\
302 	"6:     stb     %3, (%1, 0)     \n"		\
303 	"       addi    %2,  1          \n"		\
304 	"       addi    %1,  1          \n"		\
305 	"       subi    %0,  1          \n"		\
306 	"       br      5b              \n"		\
307 	"7:     br      8f              \n"		\
308 	".section __ex_table, \"a\"     \n"		\
309 	".align   2                     \n"		\
310 	".long    2b, 7b                \n"		\
311 	".long    9b, 7b                \n"		\
312 	".long   10b, 7b                \n"		\
313 	".long   11b, 7b                \n"		\
314 	".long    4b, 7b                \n"		\
315 	".long    6b, 7b                \n"		\
316 	".previous                      \n"		\
317 	"8:                             \n"		\
318 	: "=r"(n), "=r"(to), "=r"(from), "=r"(w0),	\
319 	  "=r"(w1), "=r"(w2), "=r"(w3)			\
320 	: "0"(n), "1"(to), "2"(from)			\
321 	: "memory");					\
322 } while (0)
323 
324 #define __copy_user_zeroing(to, from, n)		\
325 do {							\
326 	int tmp;					\
327 	int nsave;					\
328 	asm volatile(					\
329 	"0:     cmpnei  %1, 0           \n"		\
330 	"       bf      7f              \n"		\
331 	"       mov     %3, %1          \n"		\
332 	"       or      %3, %2          \n"		\
333 	"       andi    %3, 3           \n"		\
334 	"       cmpnei  %3, 0           \n"		\
335 	"       bf      1f              \n"		\
336 	"       br      5f              \n"		\
337 	"1:     cmplti  %0, 16          \n"		\
338 	"       bt      3f              \n"		\
339 	"2:     ldw     %3, (%2, 0)     \n"		\
340 	"10:    ldw     %4, (%2, 4)     \n"		\
341 	"       stw     %3, (%1, 0)     \n"		\
342 	"       stw     %4, (%1, 4)     \n"		\
343 	"11:    ldw     %3, (%2, 8)     \n"		\
344 	"12:    ldw     %4, (%2, 12)    \n"		\
345 	"       stw     %3, (%1, 8)     \n"		\
346 	"       stw     %4, (%1, 12)    \n"		\
347 	"       addi    %2, 16          \n"		\
348 	"       addi    %1, 16          \n"		\
349 	"       subi    %0, 16          \n"		\
350 	"       br      1b              \n"		\
351 	"3:     cmplti  %0, 4           \n"		\
352 	"       bt      5f              \n"		\
353 	"4:     ldw     %3, (%2, 0)     \n"		\
354 	"       stw     %3, (%1, 0)     \n"		\
355 	"       addi    %2, 4           \n"		\
356 	"       addi    %1, 4           \n"		\
357 	"       subi    %0, 4           \n"		\
358 	"       br      3b              \n"		\
359 	"5:     cmpnei  %0, 0           \n"		\
360 	"       bf      7f              \n"		\
361 	"6:     ldb     %3, (%2, 0)     \n"		\
362 	"       stb     %3, (%1, 0)     \n"		\
363 	"       addi    %2,  1          \n"		\
364 	"       addi    %1,  1          \n"		\
365 	"       subi    %0,  1          \n"		\
366 	"       br      5b              \n"		\
367 	"8:     mov     %3, %0          \n"		\
368 	"       movi    %4, 0           \n"		\
369 	"9:     stb     %4, (%1, 0)     \n"		\
370 	"       addi    %1, 1           \n"		\
371 	"       subi    %3, 1           \n"		\
372 	"       cmpnei  %3, 0           \n"		\
373 	"       bt      9b              \n"		\
374 	"       br      7f              \n"		\
375 	".section __ex_table, \"a\"     \n"		\
376 	".align   2                     \n"		\
377 	".long    2b, 8b                \n"		\
378 	".long   10b, 8b                \n"		\
379 	".long   11b, 8b                \n"		\
380 	".long   12b, 8b                \n"		\
381 	".long    4b, 8b                \n"		\
382 	".long    6b, 8b                \n"		\
383 	".previous                      \n"		\
384 	"7:                             \n"		\
385 	: "=r"(n), "=r"(to), "=r"(from), "=r"(nsave),	\
386 	  "=r"(tmp)					\
387 	: "0"(n), "1"(to), "2"(from)			\
388 	: "memory");					\
389 } while (0)
390 
391 unsigned long raw_copy_from_user(void *to, const void *from, unsigned long n);
392 unsigned long raw_copy_to_user(void *to, const void *from, unsigned long n);
393 
394 unsigned long clear_user(void *to, unsigned long n);
395 unsigned long __clear_user(void __user *to, unsigned long n);
396 
397 long strncpy_from_user(char *dst, const char *src, long count);
398 long __strncpy_from_user(char *dst, const char *src, long count);
399 
400 /*
401  * Return the size of a string (including the ending 0)
402  *
403  * Return 0 on exception, a value greater than N if too long
404  */
405 long strnlen_user(const char *src, long n);
406 
407 #define strlen_user(str) strnlen_user(str, 32767)
408 
409 struct exception_table_entry {
410 	unsigned long insn;
411 	unsigned long nextinsn;
412 };
413 
414 extern int fixup_exception(struct pt_regs *regs);
415 
416 #endif /* __ASM_CSKY_UACCESS_H */
417