xref: /openbmc/linux/arch/mips/include/asm/uaccess.h (revision fd589a8f)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  * Copyright (C) 2007  Maciej W. Rozycki
9  */
10 #ifndef _ASM_UACCESS_H
11 #define _ASM_UACCESS_H
12 
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/thread_info.h>
16 
17 /*
18  * The fs value determines whether argument validity checking should be
19  * performed or not.  If get_fs() == USER_DS, checking is performed, with
20  * get_fs() == KERNEL_DS, checking is bypassed.
21  *
22  * For historical reasons, these macros are grossly misnamed.
23  */
24 #ifdef CONFIG_32BIT
25 
26 #define __UA_LIMIT	0x80000000UL
27 
28 #define __UA_ADDR	".word"
29 #define __UA_LA		"la"
30 #define __UA_ADDU	"addu"
31 #define __UA_t0		"$8"
32 #define __UA_t1		"$9"
33 
34 #endif /* CONFIG_32BIT */
35 
36 #ifdef CONFIG_64BIT
37 
38 #define __UA_LIMIT	(- TASK_SIZE)
39 
40 #define __UA_ADDR	".dword"
41 #define __UA_LA		"dla"
42 #define __UA_ADDU	"daddu"
43 #define __UA_t0		"$12"
44 #define __UA_t1		"$13"
45 
46 #endif /* CONFIG_64BIT */
47 
48 /*
49  * USER_DS is a bitmask that has the bits set that may not be set in a valid
50  * userspace address.  Note that we limit 32-bit userspace to 0x7fff8000 but
51  * the arithmetic we're doing only works if the limit is a power of two, so
52  * we use 0x80000000 here on 32-bit kernels.  If a process passes an invalid
53  * address in this range it's the process's problem, not ours :-)
54  */
55 
56 #define KERNEL_DS	((mm_segment_t) { 0UL })
57 #define USER_DS		((mm_segment_t) { __UA_LIMIT })
58 
59 #define VERIFY_READ    0
60 #define VERIFY_WRITE   1
61 
62 #define get_ds()	(KERNEL_DS)
63 #define get_fs()	(current_thread_info()->addr_limit)
64 #define set_fs(x)	(current_thread_info()->addr_limit = (x))
65 
66 #define segment_eq(a, b)	((a).seg == (b).seg)
67 
68 
69 /*
70  * Is a address valid? This does a straighforward calculation rather
71  * than tests.
72  *
73  * Address valid if:
74  *  - "addr" doesn't have any high-bits set
75  *  - AND "size" doesn't have any high-bits set
76  *  - AND "addr+size" doesn't have any high-bits set
77  *  - OR we are in kernel mode.
78  *
79  * __ua_size() is a trick to avoid runtime checking of positive constant
80  * sizes; for those we already know at compile time that the size is ok.
81  */
82 #define __ua_size(size)							\
83 	((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
84 
85 /*
86  * access_ok: - Checks if a user space pointer is valid
87  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
88  *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
89  *        to write to a block, it is always safe to read from it.
90  * @addr: User space pointer to start of block to check
91  * @size: Size of block to check
92  *
93  * Context: User context only.  This function may sleep.
94  *
95  * Checks if a pointer to a block of memory in user space is valid.
96  *
97  * Returns true (nonzero) if the memory block may be valid, false (zero)
98  * if it is definitely invalid.
99  *
100  * Note that, depending on architecture, this function probably just
101  * checks that the pointer is in the user space range - after calling
102  * this function, memory access functions may still return -EFAULT.
103  */
104 
105 #define __access_mask get_fs().seg
106 
107 #define __access_ok(addr, size, mask)					\
108 ({									\
109 	unsigned long __addr = (unsigned long) (addr);			\
110 	unsigned long __size = size;					\
111 	unsigned long __mask = mask;					\
112 	unsigned long __ok;						\
113 									\
114 	__chk_user_ptr(addr);						\
115 	__ok = (signed long)(__mask & (__addr | (__addr + __size) |	\
116 		__ua_size(__size)));					\
117 	__ok == 0;							\
118 })
119 
120 #define access_ok(type, addr, size)					\
121 	likely(__access_ok((addr), (size), __access_mask))
122 
123 /*
124  * put_user: - Write a simple value into user space.
125  * @x:   Value to copy to user space.
126  * @ptr: Destination address, in user space.
127  *
128  * Context: User context only.  This function may sleep.
129  *
130  * This macro copies a single simple value from kernel space to user
131  * space.  It supports simple types like char and int, but not larger
132  * data types like structures or arrays.
133  *
134  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
135  * to the result of dereferencing @ptr.
136  *
137  * Returns zero on success, or -EFAULT on error.
138  */
139 #define put_user(x,ptr)	\
140 	__put_user_check((x), (ptr), sizeof(*(ptr)))
141 
142 /*
143  * get_user: - Get a simple variable from user space.
144  * @x:   Variable to store result.
145  * @ptr: Source address, in user space.
146  *
147  * Context: User context only.  This function may sleep.
148  *
149  * This macro copies a single simple variable from user space to kernel
150  * space.  It supports simple types like char and int, but not larger
151  * data types like structures or arrays.
152  *
153  * @ptr must have pointer-to-simple-variable type, and the result of
154  * dereferencing @ptr must be assignable to @x without a cast.
155  *
156  * Returns zero on success, or -EFAULT on error.
157  * On error, the variable @x is set to zero.
158  */
159 #define get_user(x,ptr) \
160 	__get_user_check((x), (ptr), sizeof(*(ptr)))
161 
162 /*
163  * __put_user: - Write a simple value into user space, with less checking.
164  * @x:   Value to copy to user space.
165  * @ptr: Destination address, in user space.
166  *
167  * Context: User context only.  This function may sleep.
168  *
169  * This macro copies a single simple value from kernel space to user
170  * space.  It supports simple types like char and int, but not larger
171  * data types like structures or arrays.
172  *
173  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
174  * to the result of dereferencing @ptr.
175  *
176  * Caller must check the pointer with access_ok() before calling this
177  * function.
178  *
179  * Returns zero on success, or -EFAULT on error.
180  */
181 #define __put_user(x,ptr) \
182 	__put_user_nocheck((x), (ptr), sizeof(*(ptr)))
183 
184 /*
185  * __get_user: - Get a simple variable from user space, with less checking.
186  * @x:   Variable to store result.
187  * @ptr: Source address, in user space.
188  *
189  * Context: User context only.  This function may sleep.
190  *
191  * This macro copies a single simple variable from user space to kernel
192  * space.  It supports simple types like char and int, but not larger
193  * data types like structures or arrays.
194  *
195  * @ptr must have pointer-to-simple-variable type, and the result of
196  * dereferencing @ptr must be assignable to @x without a cast.
197  *
198  * Caller must check the pointer with access_ok() before calling this
199  * function.
200  *
201  * Returns zero on success, or -EFAULT on error.
202  * On error, the variable @x is set to zero.
203  */
204 #define __get_user(x,ptr) \
205 	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
206 
207 struct __large_struct { unsigned long buf[100]; };
208 #define __m(x) (*(struct __large_struct __user *)(x))
209 
210 /*
211  * Yuck.  We need two variants, one for 64bit operation and one
212  * for 32 bit mode and old iron.
213  */
214 #ifdef CONFIG_32BIT
215 #define __GET_USER_DW(val, ptr) __get_user_asm_ll32(val, ptr)
216 #endif
217 #ifdef CONFIG_64BIT
218 #define __GET_USER_DW(val, ptr) __get_user_asm(val, "ld", ptr)
219 #endif
220 
221 extern void __get_user_unknown(void);
222 
223 #define __get_user_common(val, size, ptr)				\
224 do {									\
225 	switch (size) {							\
226 	case 1: __get_user_asm(val, "lb", ptr); break;			\
227 	case 2: __get_user_asm(val, "lh", ptr); break;			\
228 	case 4: __get_user_asm(val, "lw", ptr); break;			\
229 	case 8: __GET_USER_DW(val, ptr); break;				\
230 	default: __get_user_unknown(); break;				\
231 	}								\
232 } while (0)
233 
234 #define __get_user_nocheck(x, ptr, size)				\
235 ({									\
236 	int __gu_err;							\
237 									\
238 	__chk_user_ptr(ptr);						\
239 	__get_user_common((x), size, ptr);				\
240 	__gu_err;							\
241 })
242 
243 #define __get_user_check(x, ptr, size)					\
244 ({									\
245 	int __gu_err = -EFAULT;						\
246 	const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);		\
247 									\
248 	might_fault();							\
249 	if (likely(access_ok(VERIFY_READ,  __gu_ptr, size)))		\
250 		__get_user_common((x), size, __gu_ptr);			\
251 									\
252 	__gu_err;							\
253 })
254 
255 #define __get_user_asm(val, insn, addr)					\
256 {									\
257 	long __gu_tmp;							\
258 									\
259 	__asm__ __volatile__(						\
260 	"1:	" insn "	%1, %3				\n"	\
261 	"2:							\n"	\
262 	"	.section .fixup,\"ax\"				\n"	\
263 	"3:	li	%0, %4					\n"	\
264 	"	j	2b					\n"	\
265 	"	.previous					\n"	\
266 	"	.section __ex_table,\"a\"			\n"	\
267 	"	"__UA_ADDR "\t1b, 3b				\n"	\
268 	"	.previous					\n"	\
269 	: "=r" (__gu_err), "=r" (__gu_tmp)				\
270 	: "0" (0), "o" (__m(addr)), "i" (-EFAULT));			\
271 									\
272 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
273 }
274 
275 /*
276  * Get a long long 64 using 32 bit registers.
277  */
278 #define __get_user_asm_ll32(val, addr)					\
279 {									\
280 	union {								\
281 		unsigned long long	l;				\
282 		__typeof__(*(addr))	t;				\
283 	} __gu_tmp;							\
284 									\
285 	__asm__ __volatile__(						\
286 	"1:	lw	%1, (%3)				\n"	\
287 	"2:	lw	%D1, 4(%3)				\n"	\
288 	"3:	.section	.fixup,\"ax\"			\n"	\
289 	"4:	li	%0, %4					\n"	\
290 	"	move	%1, $0					\n"	\
291 	"	move	%D1, $0					\n"	\
292 	"	j	3b					\n"	\
293 	"	.previous					\n"	\
294 	"	.section	__ex_table,\"a\"		\n"	\
295 	"	" __UA_ADDR "	1b, 4b				\n"	\
296 	"	" __UA_ADDR "	2b, 4b				\n"	\
297 	"	.previous					\n"	\
298 	: "=r" (__gu_err), "=&r" (__gu_tmp.l)				\
299 	: "0" (0), "r" (addr), "i" (-EFAULT));				\
300 									\
301 	(val) = __gu_tmp.t;						\
302 }
303 
304 /*
305  * Yuck.  We need two variants, one for 64bit operation and one
306  * for 32 bit mode and old iron.
307  */
308 #ifdef CONFIG_32BIT
309 #define __PUT_USER_DW(ptr) __put_user_asm_ll32(ptr)
310 #endif
311 #ifdef CONFIG_64BIT
312 #define __PUT_USER_DW(ptr) __put_user_asm("sd", ptr)
313 #endif
314 
315 #define __put_user_nocheck(x, ptr, size)				\
316 ({									\
317 	__typeof__(*(ptr)) __pu_val;					\
318 	int __pu_err = 0;						\
319 									\
320 	__chk_user_ptr(ptr);						\
321 	__pu_val = (x);							\
322 	switch (size) {							\
323 	case 1: __put_user_asm("sb", ptr); break;			\
324 	case 2: __put_user_asm("sh", ptr); break;			\
325 	case 4: __put_user_asm("sw", ptr); break;			\
326 	case 8: __PUT_USER_DW(ptr); break;				\
327 	default: __put_user_unknown(); break;				\
328 	}								\
329 	__pu_err;							\
330 })
331 
332 #define __put_user_check(x, ptr, size)					\
333 ({									\
334 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
335 	__typeof__(*(ptr)) __pu_val = (x);				\
336 	int __pu_err = -EFAULT;						\
337 									\
338 	might_fault();							\
339 	if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {	\
340 		switch (size) {						\
341 		case 1: __put_user_asm("sb", __pu_addr); break;		\
342 		case 2: __put_user_asm("sh", __pu_addr); break;		\
343 		case 4: __put_user_asm("sw", __pu_addr); break;		\
344 		case 8: __PUT_USER_DW(__pu_addr); break;		\
345 		default: __put_user_unknown(); break;			\
346 		}							\
347 	}								\
348 	__pu_err;							\
349 })
350 
351 #define __put_user_asm(insn, ptr)					\
352 {									\
353 	__asm__ __volatile__(						\
354 	"1:	" insn "	%z2, %3		# __put_user_asm\n"	\
355 	"2:							\n"	\
356 	"	.section	.fixup,\"ax\"			\n"	\
357 	"3:	li	%0, %4					\n"	\
358 	"	j	2b					\n"	\
359 	"	.previous					\n"	\
360 	"	.section	__ex_table,\"a\"		\n"	\
361 	"	" __UA_ADDR "	1b, 3b				\n"	\
362 	"	.previous					\n"	\
363 	: "=r" (__pu_err)						\
364 	: "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),			\
365 	  "i" (-EFAULT));						\
366 }
367 
368 #define __put_user_asm_ll32(ptr)					\
369 {									\
370 	__asm__ __volatile__(						\
371 	"1:	sw	%2, (%3)	# __put_user_asm_ll32	\n"	\
372 	"2:	sw	%D2, 4(%3)				\n"	\
373 	"3:							\n"	\
374 	"	.section	.fixup,\"ax\"			\n"	\
375 	"4:	li	%0, %4					\n"	\
376 	"	j	3b					\n"	\
377 	"	.previous					\n"	\
378 	"	.section	__ex_table,\"a\"		\n"	\
379 	"	" __UA_ADDR "	1b, 4b				\n"	\
380 	"	" __UA_ADDR "	2b, 4b				\n"	\
381 	"	.previous"						\
382 	: "=r" (__pu_err)						\
383 	: "0" (0), "r" (__pu_val), "r" (ptr),				\
384 	  "i" (-EFAULT));						\
385 }
386 
387 extern void __put_user_unknown(void);
388 
389 /*
390  * put_user_unaligned: - Write a simple value into user space.
391  * @x:   Value to copy to user space.
392  * @ptr: Destination address, in user space.
393  *
394  * Context: User context only.  This function may sleep.
395  *
396  * This macro copies a single simple value from kernel space to user
397  * space.  It supports simple types like char and int, but not larger
398  * data types like structures or arrays.
399  *
400  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
401  * to the result of dereferencing @ptr.
402  *
403  * Returns zero on success, or -EFAULT on error.
404  */
405 #define put_user_unaligned(x,ptr)	\
406 	__put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
407 
408 /*
409  * get_user_unaligned: - Get a simple variable from user space.
410  * @x:   Variable to store result.
411  * @ptr: Source address, in user space.
412  *
413  * Context: User context only.  This function may sleep.
414  *
415  * This macro copies a single simple variable from user space to kernel
416  * space.  It supports simple types like char and int, but not larger
417  * data types like structures or arrays.
418  *
419  * @ptr must have pointer-to-simple-variable type, and the result of
420  * dereferencing @ptr must be assignable to @x without a cast.
421  *
422  * Returns zero on success, or -EFAULT on error.
423  * On error, the variable @x is set to zero.
424  */
425 #define get_user_unaligned(x,ptr) \
426 	__get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
427 
428 /*
429  * __put_user_unaligned: - Write a simple value into user space, with less checking.
430  * @x:   Value to copy to user space.
431  * @ptr: Destination address, in user space.
432  *
433  * Context: User context only.  This function may sleep.
434  *
435  * This macro copies a single simple value from kernel space to user
436  * space.  It supports simple types like char and int, but not larger
437  * data types like structures or arrays.
438  *
439  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
440  * to the result of dereferencing @ptr.
441  *
442  * Caller must check the pointer with access_ok() before calling this
443  * function.
444  *
445  * Returns zero on success, or -EFAULT on error.
446  */
447 #define __put_user_unaligned(x,ptr) \
448 	__put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
449 
450 /*
451  * __get_user_unaligned: - Get a simple variable from user space, with less checking.
452  * @x:   Variable to store result.
453  * @ptr: Source address, in user space.
454  *
455  * Context: User context only.  This function may sleep.
456  *
457  * This macro copies a single simple variable from user space to kernel
458  * space.  It supports simple types like char and int, but not larger
459  * data types like structures or arrays.
460  *
461  * @ptr must have pointer-to-simple-variable type, and the result of
462  * dereferencing @ptr must be assignable to @x without a cast.
463  *
464  * Caller must check the pointer with access_ok() before calling this
465  * function.
466  *
467  * Returns zero on success, or -EFAULT on error.
468  * On error, the variable @x is set to zero.
469  */
470 #define __get_user_unaligned(x,ptr) \
471 	__get_user__unalignednocheck((x),(ptr),sizeof(*(ptr)))
472 
473 /*
474  * Yuck.  We need two variants, one for 64bit operation and one
475  * for 32 bit mode and old iron.
476  */
477 #ifdef CONFIG_32BIT
478 #define __GET_USER_UNALIGNED_DW(val, ptr)				\
479 	__get_user_unaligned_asm_ll32(val, ptr)
480 #endif
481 #ifdef CONFIG_64BIT
482 #define __GET_USER_UNALIGNED_DW(val, ptr)				\
483 	__get_user_unaligned_asm(val, "uld", ptr)
484 #endif
485 
486 extern void __get_user_unaligned_unknown(void);
487 
488 #define __get_user_unaligned_common(val, size, ptr)			\
489 do {									\
490 	switch (size) {							\
491 	case 1: __get_user_asm(val, "lb", ptr); break;			\
492 	case 2: __get_user_unaligned_asm(val, "ulh", ptr); break;	\
493 	case 4: __get_user_unaligned_asm(val, "ulw", ptr); break;	\
494 	case 8: __GET_USER_UNALIGNED_DW(val, ptr); break;		\
495 	default: __get_user_unaligned_unknown(); break;			\
496 	}								\
497 } while (0)
498 
499 #define __get_user_unaligned_nocheck(x,ptr,size)			\
500 ({									\
501 	int __gu_err;							\
502 									\
503 	__get_user_unaligned_common((x), size, ptr);			\
504 	__gu_err;							\
505 })
506 
507 #define __get_user_unaligned_check(x,ptr,size)				\
508 ({									\
509 	int __gu_err = -EFAULT;						\
510 	const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);		\
511 									\
512 	if (likely(access_ok(VERIFY_READ,  __gu_ptr, size)))		\
513 		__get_user_unaligned_common((x), size, __gu_ptr);	\
514 									\
515 	__gu_err;							\
516 })
517 
518 #define __get_user_unaligned_asm(val, insn, addr)			\
519 {									\
520 	long __gu_tmp;							\
521 									\
522 	__asm__ __volatile__(						\
523 	"1:	" insn "	%1, %3				\n"	\
524 	"2:							\n"	\
525 	"	.section .fixup,\"ax\"				\n"	\
526 	"3:	li	%0, %4					\n"	\
527 	"	j	2b					\n"	\
528 	"	.previous					\n"	\
529 	"	.section __ex_table,\"a\"			\n"	\
530 	"	"__UA_ADDR "\t1b, 3b				\n"	\
531 	"	"__UA_ADDR "\t1b + 4, 3b			\n"	\
532 	"	.previous					\n"	\
533 	: "=r" (__gu_err), "=r" (__gu_tmp)				\
534 	: "0" (0), "o" (__m(addr)), "i" (-EFAULT));			\
535 									\
536 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
537 }
538 
539 /*
540  * Get a long long 64 using 32 bit registers.
541  */
542 #define __get_user_unaligned_asm_ll32(val, addr)			\
543 {									\
544         unsigned long long __gu_tmp;					\
545 									\
546 	__asm__ __volatile__(						\
547 	"1:	ulw	%1, (%3)				\n"	\
548 	"2:	ulw	%D1, 4(%3)				\n"	\
549 	"	move	%0, $0					\n"	\
550 	"3:	.section	.fixup,\"ax\"			\n"	\
551 	"4:	li	%0, %4					\n"	\
552 	"	move	%1, $0					\n"	\
553 	"	move	%D1, $0					\n"	\
554 	"	j	3b					\n"	\
555 	"	.previous					\n"	\
556 	"	.section	__ex_table,\"a\"		\n"	\
557 	"	" __UA_ADDR "	1b, 4b				\n"	\
558 	"	" __UA_ADDR "	1b + 4, 4b			\n"	\
559 	"	" __UA_ADDR "	2b, 4b				\n"	\
560 	"	" __UA_ADDR "	2b + 4, 4b			\n"	\
561 	"	.previous					\n"	\
562 	: "=r" (__gu_err), "=&r" (__gu_tmp)				\
563 	: "0" (0), "r" (addr), "i" (-EFAULT));				\
564 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
565 }
566 
567 /*
568  * Yuck.  We need two variants, one for 64bit operation and one
569  * for 32 bit mode and old iron.
570  */
571 #ifdef CONFIG_32BIT
572 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
573 #endif
574 #ifdef CONFIG_64BIT
575 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
576 #endif
577 
578 #define __put_user_unaligned_nocheck(x,ptr,size)			\
579 ({									\
580 	__typeof__(*(ptr)) __pu_val;					\
581 	int __pu_err = 0;						\
582 									\
583 	__pu_val = (x);							\
584 	switch (size) {							\
585 	case 1: __put_user_asm("sb", ptr); break;			\
586 	case 2: __put_user_unaligned_asm("ush", ptr); break;		\
587 	case 4: __put_user_unaligned_asm("usw", ptr); break;		\
588 	case 8: __PUT_USER_UNALIGNED_DW(ptr); break;			\
589 	default: __put_user_unaligned_unknown(); break;			\
590 	}								\
591 	__pu_err;							\
592 })
593 
594 #define __put_user_unaligned_check(x,ptr,size)				\
595 ({									\
596 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
597 	__typeof__(*(ptr)) __pu_val = (x);				\
598 	int __pu_err = -EFAULT;						\
599 									\
600 	if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {	\
601 		switch (size) {						\
602 		case 1: __put_user_asm("sb", __pu_addr); break;		\
603 		case 2: __put_user_unaligned_asm("ush", __pu_addr); break; \
604 		case 4: __put_user_unaligned_asm("usw", __pu_addr); break; \
605 		case 8: __PUT_USER_UNALGINED_DW(__pu_addr); break;	\
606 		default: __put_user_unaligned_unknown(); break;		\
607 		}							\
608 	}								\
609 	__pu_err;							\
610 })
611 
612 #define __put_user_unaligned_asm(insn, ptr)				\
613 {									\
614 	__asm__ __volatile__(						\
615 	"1:	" insn "	%z2, %3		# __put_user_unaligned_asm\n" \
616 	"2:							\n"	\
617 	"	.section	.fixup,\"ax\"			\n"	\
618 	"3:	li	%0, %4					\n"	\
619 	"	j	2b					\n"	\
620 	"	.previous					\n"	\
621 	"	.section	__ex_table,\"a\"		\n"	\
622 	"	" __UA_ADDR "	1b, 3b				\n"	\
623 	"	.previous					\n"	\
624 	: "=r" (__pu_err)						\
625 	: "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),			\
626 	  "i" (-EFAULT));						\
627 }
628 
629 #define __put_user_unaligned_asm_ll32(ptr)				\
630 {									\
631 	__asm__ __volatile__(						\
632 	"1:	sw	%2, (%3)	# __put_user_unaligned_asm_ll32	\n" \
633 	"2:	sw	%D2, 4(%3)				\n"	\
634 	"3:							\n"	\
635 	"	.section	.fixup,\"ax\"			\n"	\
636 	"4:	li	%0, %4					\n"	\
637 	"	j	3b					\n"	\
638 	"	.previous					\n"	\
639 	"	.section	__ex_table,\"a\"		\n"	\
640 	"	" __UA_ADDR "	1b, 4b				\n"	\
641 	"	" __UA_ADDR "	1b + 4, 4b			\n"	\
642 	"	" __UA_ADDR "	2b, 4b				\n"	\
643 	"	" __UA_ADDR "	2b + 4, 4b			\n"	\
644 	"	.previous"						\
645 	: "=r" (__pu_err)						\
646 	: "0" (0), "r" (__pu_val), "r" (ptr),				\
647 	  "i" (-EFAULT));						\
648 }
649 
650 extern void __put_user_unaligned_unknown(void);
651 
652 /*
653  * We're generating jump to subroutines which will be outside the range of
654  * jump instructions
655  */
656 #ifdef MODULE
657 #define __MODULE_JAL(destination)					\
658 	".set\tnoat\n\t"						\
659 	__UA_LA "\t$1, " #destination "\n\t" 				\
660 	"jalr\t$1\n\t"							\
661 	".set\tat\n\t"
662 #else
663 #define __MODULE_JAL(destination)					\
664 	"jal\t" #destination "\n\t"
665 #endif
666 
667 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
668 #define DADDI_SCRATCH "$0"
669 #else
670 #define DADDI_SCRATCH "$3"
671 #endif
672 
673 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
674 
675 #define __invoke_copy_to_user(to, from, n)				\
676 ({									\
677 	register void __user *__cu_to_r __asm__("$4");			\
678 	register const void *__cu_from_r __asm__("$5");			\
679 	register long __cu_len_r __asm__("$6");				\
680 									\
681 	__cu_to_r = (to);						\
682 	__cu_from_r = (from);						\
683 	__cu_len_r = (n);						\
684 	__asm__ __volatile__(						\
685 	__MODULE_JAL(__copy_user)					\
686 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
687 	:								\
688 	: "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31",		\
689 	  DADDI_SCRATCH, "memory");					\
690 	__cu_len_r;							\
691 })
692 
693 /*
694  * __copy_to_user: - Copy a block of data into user space, with less checking.
695  * @to:   Destination address, in user space.
696  * @from: Source address, in kernel space.
697  * @n:    Number of bytes to copy.
698  *
699  * Context: User context only.  This function may sleep.
700  *
701  * Copy data from kernel space to user space.  Caller must check
702  * the specified block with access_ok() before calling this function.
703  *
704  * Returns number of bytes that could not be copied.
705  * On success, this will be zero.
706  */
707 #define __copy_to_user(to, from, n)					\
708 ({									\
709 	void __user *__cu_to;						\
710 	const void *__cu_from;						\
711 	long __cu_len;							\
712 									\
713 	__cu_to = (to);							\
714 	__cu_from = (from);						\
715 	__cu_len = (n);							\
716 	might_fault();							\
717 	__cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len);	\
718 	__cu_len;							\
719 })
720 
721 extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
722 
723 #define __copy_to_user_inatomic(to, from, n)				\
724 ({									\
725 	void __user *__cu_to;						\
726 	const void *__cu_from;						\
727 	long __cu_len;							\
728 									\
729 	__cu_to = (to);							\
730 	__cu_from = (from);						\
731 	__cu_len = (n);							\
732 	__cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len);	\
733 	__cu_len;							\
734 })
735 
736 #define __copy_from_user_inatomic(to, from, n)				\
737 ({									\
738 	void *__cu_to;							\
739 	const void __user *__cu_from;					\
740 	long __cu_len;							\
741 									\
742 	__cu_to = (to);							\
743 	__cu_from = (from);						\
744 	__cu_len = (n);							\
745 	__cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from,	\
746 	                                            __cu_len);		\
747 	__cu_len;							\
748 })
749 
750 /*
751  * copy_to_user: - Copy a block of data into user space.
752  * @to:   Destination address, in user space.
753  * @from: Source address, in kernel space.
754  * @n:    Number of bytes to copy.
755  *
756  * Context: User context only.  This function may sleep.
757  *
758  * Copy data from kernel space to user space.
759  *
760  * Returns number of bytes that could not be copied.
761  * On success, this will be zero.
762  */
763 #define copy_to_user(to, from, n)					\
764 ({									\
765 	void __user *__cu_to;						\
766 	const void *__cu_from;						\
767 	long __cu_len;							\
768 									\
769 	__cu_to = (to);							\
770 	__cu_from = (from);						\
771 	__cu_len = (n);							\
772 	if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) {		\
773 		might_fault();						\
774 		__cu_len = __invoke_copy_to_user(__cu_to, __cu_from,	\
775 		                                 __cu_len);		\
776 	}								\
777 	__cu_len;							\
778 })
779 
780 #define __invoke_copy_from_user(to, from, n)				\
781 ({									\
782 	register void *__cu_to_r __asm__("$4");				\
783 	register const void __user *__cu_from_r __asm__("$5");		\
784 	register long __cu_len_r __asm__("$6");				\
785 									\
786 	__cu_to_r = (to);						\
787 	__cu_from_r = (from);						\
788 	__cu_len_r = (n);						\
789 	__asm__ __volatile__(						\
790 	".set\tnoreorder\n\t"						\
791 	__MODULE_JAL(__copy_user)					\
792 	".set\tnoat\n\t"						\
793 	__UA_ADDU "\t$1, %1, %2\n\t"					\
794 	".set\tat\n\t"							\
795 	".set\treorder"							\
796 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
797 	:								\
798 	: "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31",		\
799 	  DADDI_SCRATCH, "memory");					\
800 	__cu_len_r;							\
801 })
802 
803 #define __invoke_copy_from_user_inatomic(to, from, n)			\
804 ({									\
805 	register void *__cu_to_r __asm__("$4");				\
806 	register const void __user *__cu_from_r __asm__("$5");		\
807 	register long __cu_len_r __asm__("$6");				\
808 									\
809 	__cu_to_r = (to);						\
810 	__cu_from_r = (from);						\
811 	__cu_len_r = (n);						\
812 	__asm__ __volatile__(						\
813 	".set\tnoreorder\n\t"						\
814 	__MODULE_JAL(__copy_user_inatomic)				\
815 	".set\tnoat\n\t"						\
816 	__UA_ADDU "\t$1, %1, %2\n\t"					\
817 	".set\tat\n\t"							\
818 	".set\treorder"							\
819 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
820 	:								\
821 	: "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31",		\
822 	  DADDI_SCRATCH, "memory");					\
823 	__cu_len_r;							\
824 })
825 
826 /*
827  * __copy_from_user: - Copy a block of data from user space, with less checking.
828  * @to:   Destination address, in kernel space.
829  * @from: Source address, in user space.
830  * @n:    Number of bytes to copy.
831  *
832  * Context: User context only.  This function may sleep.
833  *
834  * Copy data from user space to kernel space.  Caller must check
835  * the specified block with access_ok() before calling this function.
836  *
837  * Returns number of bytes that could not be copied.
838  * On success, this will be zero.
839  *
840  * If some data could not be copied, this function will pad the copied
841  * data to the requested size using zero bytes.
842  */
843 #define __copy_from_user(to, from, n)					\
844 ({									\
845 	void *__cu_to;							\
846 	const void __user *__cu_from;					\
847 	long __cu_len;							\
848 									\
849 	__cu_to = (to);							\
850 	__cu_from = (from);						\
851 	__cu_len = (n);							\
852 	might_fault();							\
853 	__cu_len = __invoke_copy_from_user(__cu_to, __cu_from,		\
854 	                                   __cu_len);			\
855 	__cu_len;							\
856 })
857 
858 /*
859  * copy_from_user: - Copy a block of data from user space.
860  * @to:   Destination address, in kernel space.
861  * @from: Source address, in user space.
862  * @n:    Number of bytes to copy.
863  *
864  * Context: User context only.  This function may sleep.
865  *
866  * Copy data from user space to kernel space.
867  *
868  * Returns number of bytes that could not be copied.
869  * On success, this will be zero.
870  *
871  * If some data could not be copied, this function will pad the copied
872  * data to the requested size using zero bytes.
873  */
874 #define copy_from_user(to, from, n)					\
875 ({									\
876 	void *__cu_to;							\
877 	const void __user *__cu_from;					\
878 	long __cu_len;							\
879 									\
880 	__cu_to = (to);							\
881 	__cu_from = (from);						\
882 	__cu_len = (n);							\
883 	if (access_ok(VERIFY_READ, __cu_from, __cu_len)) {		\
884 		might_fault();						\
885 		__cu_len = __invoke_copy_from_user(__cu_to, __cu_from,	\
886 		                                   __cu_len);		\
887 	}								\
888 	__cu_len;							\
889 })
890 
891 #define __copy_in_user(to, from, n)					\
892 ({									\
893 	void __user *__cu_to;						\
894 	const void __user *__cu_from;					\
895 	long __cu_len;							\
896 									\
897 	__cu_to = (to);							\
898 	__cu_from = (from);						\
899 	__cu_len = (n);							\
900 	might_fault();							\
901 	__cu_len = __invoke_copy_from_user(__cu_to, __cu_from,		\
902 	                                   __cu_len);			\
903 	__cu_len;							\
904 })
905 
906 #define copy_in_user(to, from, n)					\
907 ({									\
908 	void __user *__cu_to;						\
909 	const void __user *__cu_from;					\
910 	long __cu_len;							\
911 									\
912 	__cu_to = (to);							\
913 	__cu_from = (from);						\
914 	__cu_len = (n);							\
915 	if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&	\
916 	           access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {	\
917 		might_fault();						\
918 		__cu_len = __invoke_copy_from_user(__cu_to, __cu_from,	\
919 		                                   __cu_len);		\
920 	}								\
921 	__cu_len;							\
922 })
923 
924 /*
925  * __clear_user: - Zero a block of memory in user space, with less checking.
926  * @to:   Destination address, in user space.
927  * @n:    Number of bytes to zero.
928  *
929  * Zero a block of memory in user space.  Caller must check
930  * the specified block with access_ok() before calling this function.
931  *
932  * Returns number of bytes that could not be cleared.
933  * On success, this will be zero.
934  */
935 static inline __kernel_size_t
936 __clear_user(void __user *addr, __kernel_size_t size)
937 {
938 	__kernel_size_t res;
939 
940 	might_fault();
941 	__asm__ __volatile__(
942 		"move\t$4, %1\n\t"
943 		"move\t$5, $0\n\t"
944 		"move\t$6, %2\n\t"
945 		__MODULE_JAL(__bzero)
946 		"move\t%0, $6"
947 		: "=r" (res)
948 		: "r" (addr), "r" (size)
949 		: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
950 
951 	return res;
952 }
953 
954 #define clear_user(addr,n)						\
955 ({									\
956 	void __user * __cl_addr = (addr);				\
957 	unsigned long __cl_size = (n);					\
958 	if (__cl_size && access_ok(VERIFY_WRITE,			\
959 					__cl_addr, __cl_size))		\
960 		__cl_size = __clear_user(__cl_addr, __cl_size);		\
961 	__cl_size;							\
962 })
963 
964 /*
965  * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
966  * @dst:   Destination address, in kernel space.  This buffer must be at
967  *         least @count bytes long.
968  * @src:   Source address, in user space.
969  * @count: Maximum number of bytes to copy, including the trailing NUL.
970  *
971  * Copies a NUL-terminated string from userspace to kernel space.
972  * Caller must check the specified block with access_ok() before calling
973  * this function.
974  *
975  * On success, returns the length of the string (not including the trailing
976  * NUL).
977  *
978  * If access to userspace fails, returns -EFAULT (some data may have been
979  * copied).
980  *
981  * If @count is smaller than the length of the string, copies @count bytes
982  * and returns @count.
983  */
984 static inline long
985 __strncpy_from_user(char *__to, const char __user *__from, long __len)
986 {
987 	long res;
988 
989 	might_fault();
990 	__asm__ __volatile__(
991 		"move\t$4, %1\n\t"
992 		"move\t$5, %2\n\t"
993 		"move\t$6, %3\n\t"
994 		__MODULE_JAL(__strncpy_from_user_nocheck_asm)
995 		"move\t%0, $2"
996 		: "=r" (res)
997 		: "r" (__to), "r" (__from), "r" (__len)
998 		: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
999 
1000 	return res;
1001 }
1002 
1003 /*
1004  * strncpy_from_user: - Copy a NUL terminated string from userspace.
1005  * @dst:   Destination address, in kernel space.  This buffer must be at
1006  *         least @count bytes long.
1007  * @src:   Source address, in user space.
1008  * @count: Maximum number of bytes to copy, including the trailing NUL.
1009  *
1010  * Copies a NUL-terminated string from userspace to kernel space.
1011  *
1012  * On success, returns the length of the string (not including the trailing
1013  * NUL).
1014  *
1015  * If access to userspace fails, returns -EFAULT (some data may have been
1016  * copied).
1017  *
1018  * If @count is smaller than the length of the string, copies @count bytes
1019  * and returns @count.
1020  */
1021 static inline long
1022 strncpy_from_user(char *__to, const char __user *__from, long __len)
1023 {
1024 	long res;
1025 
1026 	might_fault();
1027 	__asm__ __volatile__(
1028 		"move\t$4, %1\n\t"
1029 		"move\t$5, %2\n\t"
1030 		"move\t$6, %3\n\t"
1031 		__MODULE_JAL(__strncpy_from_user_asm)
1032 		"move\t%0, $2"
1033 		: "=r" (res)
1034 		: "r" (__to), "r" (__from), "r" (__len)
1035 		: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1036 
1037 	return res;
1038 }
1039 
1040 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1041 static inline long __strlen_user(const char __user *s)
1042 {
1043 	long res;
1044 
1045 	might_fault();
1046 	__asm__ __volatile__(
1047 		"move\t$4, %1\n\t"
1048 		__MODULE_JAL(__strlen_user_nocheck_asm)
1049 		"move\t%0, $2"
1050 		: "=r" (res)
1051 		: "r" (s)
1052 		: "$2", "$4", __UA_t0, "$31");
1053 
1054 	return res;
1055 }
1056 
1057 /*
1058  * strlen_user: - Get the size of a string in user space.
1059  * @str: The string to measure.
1060  *
1061  * Context: User context only.  This function may sleep.
1062  *
1063  * Get the size of a NUL-terminated string in user space.
1064  *
1065  * Returns the size of the string INCLUDING the terminating NUL.
1066  * On exception, returns 0.
1067  *
1068  * If there is a limit on the length of a valid string, you may wish to
1069  * consider using strnlen_user() instead.
1070  */
1071 static inline long strlen_user(const char __user *s)
1072 {
1073 	long res;
1074 
1075 	might_fault();
1076 	__asm__ __volatile__(
1077 		"move\t$4, %1\n\t"
1078 		__MODULE_JAL(__strlen_user_asm)
1079 		"move\t%0, $2"
1080 		: "=r" (res)
1081 		: "r" (s)
1082 		: "$2", "$4", __UA_t0, "$31");
1083 
1084 	return res;
1085 }
1086 
1087 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1088 static inline long __strnlen_user(const char __user *s, long n)
1089 {
1090 	long res;
1091 
1092 	might_fault();
1093 	__asm__ __volatile__(
1094 		"move\t$4, %1\n\t"
1095 		"move\t$5, %2\n\t"
1096 		__MODULE_JAL(__strnlen_user_nocheck_asm)
1097 		"move\t%0, $2"
1098 		: "=r" (res)
1099 		: "r" (s), "r" (n)
1100 		: "$2", "$4", "$5", __UA_t0, "$31");
1101 
1102 	return res;
1103 }
1104 
1105 /*
1106  * strlen_user: - Get the size of a string in user space.
1107  * @str: The string to measure.
1108  *
1109  * Context: User context only.  This function may sleep.
1110  *
1111  * Get the size of a NUL-terminated string in user space.
1112  *
1113  * Returns the size of the string INCLUDING the terminating NUL.
1114  * On exception, returns 0.
1115  *
1116  * If there is a limit on the length of a valid string, you may wish to
1117  * consider using strnlen_user() instead.
1118  */
1119 static inline long strnlen_user(const char __user *s, long n)
1120 {
1121 	long res;
1122 
1123 	might_fault();
1124 	__asm__ __volatile__(
1125 		"move\t$4, %1\n\t"
1126 		"move\t$5, %2\n\t"
1127 		__MODULE_JAL(__strnlen_user_asm)
1128 		"move\t%0, $2"
1129 		: "=r" (res)
1130 		: "r" (s), "r" (n)
1131 		: "$2", "$4", "$5", __UA_t0, "$31");
1132 
1133 	return res;
1134 }
1135 
1136 struct exception_table_entry
1137 {
1138 	unsigned long insn;
1139 	unsigned long nextinsn;
1140 };
1141 
1142 extern int fixup_exception(struct pt_regs *regs);
1143 
1144 #endif /* _ASM_UACCESS_H */
1145