xref: /openbmc/linux/arch/mips/include/asm/uaccess.h (revision a8da474e)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  * Copyright (C) 2007  Maciej W. Rozycki
9  * Copyright (C) 2014, Imagination Technologies Ltd.
10  */
11 #ifndef _ASM_UACCESS_H
12 #define _ASM_UACCESS_H
13 
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/thread_info.h>
17 #include <asm/asm-eva.h>
18 
19 /*
20  * The fs value determines whether argument validity checking should be
21  * performed or not.  If get_fs() == USER_DS, checking is performed, with
22  * get_fs() == KERNEL_DS, checking is bypassed.
23  *
24  * For historical reasons, these macros are grossly misnamed.
25  */
26 #ifdef CONFIG_32BIT
27 
28 #ifdef CONFIG_KVM_GUEST
29 #define __UA_LIMIT 0x40000000UL
30 #else
31 #define __UA_LIMIT 0x80000000UL
32 #endif
33 
34 #define __UA_ADDR	".word"
35 #define __UA_LA		"la"
36 #define __UA_ADDU	"addu"
37 #define __UA_t0		"$8"
38 #define __UA_t1		"$9"
39 
40 #endif /* CONFIG_32BIT */
41 
42 #ifdef CONFIG_64BIT
43 
44 extern u64 __ua_limit;
45 
46 #define __UA_LIMIT	__ua_limit
47 
48 #define __UA_ADDR	".dword"
49 #define __UA_LA		"dla"
50 #define __UA_ADDU	"daddu"
51 #define __UA_t0		"$12"
52 #define __UA_t1		"$13"
53 
54 #endif /* CONFIG_64BIT */
55 
56 /*
57  * USER_DS is a bitmask that has the bits set that may not be set in a valid
58  * userspace address.  Note that we limit 32-bit userspace to 0x7fff8000 but
59  * the arithmetic we're doing only works if the limit is a power of two, so
60  * we use 0x80000000 here on 32-bit kernels.  If a process passes an invalid
61  * address in this range it's the process's problem, not ours :-)
62  */
63 
64 #ifdef CONFIG_KVM_GUEST
65 #define KERNEL_DS	((mm_segment_t) { 0x80000000UL })
66 #define USER_DS		((mm_segment_t) { 0xC0000000UL })
67 #else
68 #define KERNEL_DS	((mm_segment_t) { 0UL })
69 #define USER_DS		((mm_segment_t) { __UA_LIMIT })
70 #endif
71 
72 #define VERIFY_READ    0
73 #define VERIFY_WRITE   1
74 
75 #define get_ds()	(KERNEL_DS)
76 #define get_fs()	(current_thread_info()->addr_limit)
77 #define set_fs(x)	(current_thread_info()->addr_limit = (x))
78 
79 #define segment_eq(a, b)	((a).seg == (b).seg)
80 
81 /*
82  * eva_kernel_access() - determine whether kernel memory access on an EVA system
83  *
84  * Determines whether memory accesses should be performed to kernel memory
85  * on a system using Extended Virtual Addressing (EVA).
86  *
87  * Return: true if a kernel memory access on an EVA system, else false.
88  */
89 static inline bool eva_kernel_access(void)
90 {
91 	if (!config_enabled(CONFIG_EVA))
92 		return false;
93 
94 	return segment_eq(get_fs(), get_ds());
95 }
96 
97 /*
98  * Is a address valid? This does a straighforward calculation rather
99  * than tests.
100  *
101  * Address valid if:
102  *  - "addr" doesn't have any high-bits set
103  *  - AND "size" doesn't have any high-bits set
104  *  - AND "addr+size" doesn't have any high-bits set
105  *  - OR we are in kernel mode.
106  *
107  * __ua_size() is a trick to avoid runtime checking of positive constant
108  * sizes; for those we already know at compile time that the size is ok.
109  */
110 #define __ua_size(size)							\
111 	((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
112 
113 /*
114  * access_ok: - Checks if a user space pointer is valid
115  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
116  *	  %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
117  *	  to write to a block, it is always safe to read from it.
118  * @addr: User space pointer to start of block to check
119  * @size: Size of block to check
120  *
121  * Context: User context only. This function may sleep if pagefaults are
122  *          enabled.
123  *
124  * Checks if a pointer to a block of memory in user space is valid.
125  *
126  * Returns true (nonzero) if the memory block may be valid, false (zero)
127  * if it is definitely invalid.
128  *
129  * Note that, depending on architecture, this function probably just
130  * checks that the pointer is in the user space range - after calling
131  * this function, memory access functions may still return -EFAULT.
132  */
133 
134 #define __access_mask get_fs().seg
135 
136 #define __access_ok(addr, size, mask)					\
137 ({									\
138 	unsigned long __addr = (unsigned long) (addr);			\
139 	unsigned long __size = size;					\
140 	unsigned long __mask = mask;					\
141 	unsigned long __ok;						\
142 									\
143 	__chk_user_ptr(addr);						\
144 	__ok = (signed long)(__mask & (__addr | (__addr + __size) |	\
145 		__ua_size(__size)));					\
146 	__ok == 0;							\
147 })
148 
149 #define access_ok(type, addr, size)					\
150 	likely(__access_ok((addr), (size), __access_mask))
151 
152 /*
153  * put_user: - Write a simple value into user space.
154  * @x:	 Value to copy to user space.
155  * @ptr: Destination address, in user space.
156  *
157  * Context: User context only. This function may sleep if pagefaults are
158  *          enabled.
159  *
160  * This macro copies a single simple value from kernel space to user
161  * space.  It supports simple types like char and int, but not larger
162  * data types like structures or arrays.
163  *
164  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
165  * to the result of dereferencing @ptr.
166  *
167  * Returns zero on success, or -EFAULT on error.
168  */
169 #define put_user(x,ptr) \
170 	__put_user_check((x), (ptr), sizeof(*(ptr)))
171 
172 /*
173  * get_user: - Get a simple variable from user space.
174  * @x:	 Variable to store result.
175  * @ptr: Source address, in user space.
176  *
177  * Context: User context only. This function may sleep if pagefaults are
178  *          enabled.
179  *
180  * This macro copies a single simple variable from user space to kernel
181  * space.  It supports simple types like char and int, but not larger
182  * data types like structures or arrays.
183  *
184  * @ptr must have pointer-to-simple-variable type, and the result of
185  * dereferencing @ptr must be assignable to @x without a cast.
186  *
187  * Returns zero on success, or -EFAULT on error.
188  * On error, the variable @x is set to zero.
189  */
190 #define get_user(x,ptr) \
191 	__get_user_check((x), (ptr), sizeof(*(ptr)))
192 
193 /*
194  * __put_user: - Write a simple value into user space, with less checking.
195  * @x:	 Value to copy to user space.
196  * @ptr: Destination address, in user space.
197  *
198  * Context: User context only. This function may sleep if pagefaults are
199  *          enabled.
200  *
201  * This macro copies a single simple value from kernel space to user
202  * space.  It supports simple types like char and int, but not larger
203  * data types like structures or arrays.
204  *
205  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
206  * to the result of dereferencing @ptr.
207  *
208  * Caller must check the pointer with access_ok() before calling this
209  * function.
210  *
211  * Returns zero on success, or -EFAULT on error.
212  */
213 #define __put_user(x,ptr) \
214 	__put_user_nocheck((x), (ptr), sizeof(*(ptr)))
215 
216 /*
217  * __get_user: - Get a simple variable from user space, with less checking.
218  * @x:	 Variable to store result.
219  * @ptr: Source address, in user space.
220  *
221  * Context: User context only. This function may sleep if pagefaults are
222  *          enabled.
223  *
224  * This macro copies a single simple variable from user space to kernel
225  * space.  It supports simple types like char and int, but not larger
226  * data types like structures or arrays.
227  *
228  * @ptr must have pointer-to-simple-variable type, and the result of
229  * dereferencing @ptr must be assignable to @x without a cast.
230  *
231  * Caller must check the pointer with access_ok() before calling this
232  * function.
233  *
234  * Returns zero on success, or -EFAULT on error.
235  * On error, the variable @x is set to zero.
236  */
237 #define __get_user(x,ptr) \
238 	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
239 
240 struct __large_struct { unsigned long buf[100]; };
241 #define __m(x) (*(struct __large_struct __user *)(x))
242 
243 /*
244  * Yuck.  We need two variants, one for 64bit operation and one
245  * for 32 bit mode and old iron.
246  */
247 #ifndef CONFIG_EVA
248 #define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
249 #else
250 /*
251  * Kernel specific functions for EVA. We need to use normal load instructions
252  * to read data from kernel when operating in EVA mode. We use these macros to
253  * avoid redefining __get_user_asm for EVA.
254  */
255 #undef _loadd
256 #undef _loadw
257 #undef _loadh
258 #undef _loadb
259 #ifdef CONFIG_32BIT
260 #define _loadd			_loadw
261 #else
262 #define _loadd(reg, addr)	"ld " reg ", " addr
263 #endif
264 #define _loadw(reg, addr)	"lw " reg ", " addr
265 #define _loadh(reg, addr)	"lh " reg ", " addr
266 #define _loadb(reg, addr)	"lb " reg ", " addr
267 
268 #define __get_kernel_common(val, size, ptr)				\
269 do {									\
270 	switch (size) {							\
271 	case 1: __get_data_asm(val, _loadb, ptr); break;		\
272 	case 2: __get_data_asm(val, _loadh, ptr); break;		\
273 	case 4: __get_data_asm(val, _loadw, ptr); break;		\
274 	case 8: __GET_DW(val, _loadd, ptr); break;			\
275 	default: __get_user_unknown(); break;				\
276 	}								\
277 } while (0)
278 #endif
279 
280 #ifdef CONFIG_32BIT
281 #define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
282 #endif
283 #ifdef CONFIG_64BIT
284 #define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
285 #endif
286 
287 extern void __get_user_unknown(void);
288 
289 #define __get_user_common(val, size, ptr)				\
290 do {									\
291 	switch (size) {							\
292 	case 1: __get_data_asm(val, user_lb, ptr); break;		\
293 	case 2: __get_data_asm(val, user_lh, ptr); break;		\
294 	case 4: __get_data_asm(val, user_lw, ptr); break;		\
295 	case 8: __GET_DW(val, user_ld, ptr); break;			\
296 	default: __get_user_unknown(); break;				\
297 	}								\
298 } while (0)
299 
300 #define __get_user_nocheck(x, ptr, size)				\
301 ({									\
302 	int __gu_err;							\
303 									\
304 	if (eva_kernel_access()) {					\
305 		__get_kernel_common((x), size, ptr);			\
306 	} else {							\
307 		__chk_user_ptr(ptr);					\
308 		__get_user_common((x), size, ptr);			\
309 	}								\
310 	__gu_err;							\
311 })
312 
313 #define __get_user_check(x, ptr, size)					\
314 ({									\
315 	int __gu_err = -EFAULT;						\
316 	const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);		\
317 									\
318 	might_fault();							\
319 	if (likely(access_ok(VERIFY_READ,  __gu_ptr, size))) {		\
320 		if (eva_kernel_access())				\
321 			__get_kernel_common((x), size, __gu_ptr);	\
322 		else							\
323 			__get_user_common((x), size, __gu_ptr);		\
324 	} else								\
325 		(x) = 0;						\
326 									\
327 	__gu_err;							\
328 })
329 
330 #define __get_data_asm(val, insn, addr)					\
331 {									\
332 	long __gu_tmp;							\
333 									\
334 	__asm__ __volatile__(						\
335 	"1:	"insn("%1", "%3")"				\n"	\
336 	"2:							\n"	\
337 	"	.insn						\n"	\
338 	"	.section .fixup,\"ax\"				\n"	\
339 	"3:	li	%0, %4					\n"	\
340 	"	move	%1, $0					\n"	\
341 	"	j	2b					\n"	\
342 	"	.previous					\n"	\
343 	"	.section __ex_table,\"a\"			\n"	\
344 	"	"__UA_ADDR "\t1b, 3b				\n"	\
345 	"	.previous					\n"	\
346 	: "=r" (__gu_err), "=r" (__gu_tmp)				\
347 	: "0" (0), "o" (__m(addr)), "i" (-EFAULT));			\
348 									\
349 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
350 }
351 
352 /*
353  * Get a long long 64 using 32 bit registers.
354  */
355 #define __get_data_asm_ll32(val, insn, addr)				\
356 {									\
357 	union {								\
358 		unsigned long long	l;				\
359 		__typeof__(*(addr))	t;				\
360 	} __gu_tmp;							\
361 									\
362 	__asm__ __volatile__(						\
363 	"1:	" insn("%1", "(%3)")"				\n"	\
364 	"2:	" insn("%D1", "4(%3)")"				\n"	\
365 	"3:							\n"	\
366 	"	.insn						\n"	\
367 	"	.section	.fixup,\"ax\"			\n"	\
368 	"4:	li	%0, %4					\n"	\
369 	"	move	%1, $0					\n"	\
370 	"	move	%D1, $0					\n"	\
371 	"	j	3b					\n"	\
372 	"	.previous					\n"	\
373 	"	.section	__ex_table,\"a\"		\n"	\
374 	"	" __UA_ADDR "	1b, 4b				\n"	\
375 	"	" __UA_ADDR "	2b, 4b				\n"	\
376 	"	.previous					\n"	\
377 	: "=r" (__gu_err), "=&r" (__gu_tmp.l)				\
378 	: "0" (0), "r" (addr), "i" (-EFAULT));				\
379 									\
380 	(val) = __gu_tmp.t;						\
381 }
382 
383 #ifndef CONFIG_EVA
384 #define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
385 #else
386 /*
387  * Kernel specific functions for EVA. We need to use normal load instructions
388  * to read data from kernel when operating in EVA mode. We use these macros to
389  * avoid redefining __get_data_asm for EVA.
390  */
391 #undef _stored
392 #undef _storew
393 #undef _storeh
394 #undef _storeb
395 #ifdef CONFIG_32BIT
396 #define _stored			_storew
397 #else
398 #define _stored(reg, addr)	"ld " reg ", " addr
399 #endif
400 
401 #define _storew(reg, addr)	"sw " reg ", " addr
402 #define _storeh(reg, addr)	"sh " reg ", " addr
403 #define _storeb(reg, addr)	"sb " reg ", " addr
404 
405 #define __put_kernel_common(ptr, size)					\
406 do {									\
407 	switch (size) {							\
408 	case 1: __put_data_asm(_storeb, ptr); break;			\
409 	case 2: __put_data_asm(_storeh, ptr); break;			\
410 	case 4: __put_data_asm(_storew, ptr); break;			\
411 	case 8: __PUT_DW(_stored, ptr); break;				\
412 	default: __put_user_unknown(); break;				\
413 	}								\
414 } while(0)
415 #endif
416 
417 /*
418  * Yuck.  We need two variants, one for 64bit operation and one
419  * for 32 bit mode and old iron.
420  */
421 #ifdef CONFIG_32BIT
422 #define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
423 #endif
424 #ifdef CONFIG_64BIT
425 #define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
426 #endif
427 
428 #define __put_user_common(ptr, size)					\
429 do {									\
430 	switch (size) {							\
431 	case 1: __put_data_asm(user_sb, ptr); break;			\
432 	case 2: __put_data_asm(user_sh, ptr); break;			\
433 	case 4: __put_data_asm(user_sw, ptr); break;			\
434 	case 8: __PUT_DW(user_sd, ptr); break;				\
435 	default: __put_user_unknown(); break;				\
436 	}								\
437 } while (0)
438 
439 #define __put_user_nocheck(x, ptr, size)				\
440 ({									\
441 	__typeof__(*(ptr)) __pu_val;					\
442 	int __pu_err = 0;						\
443 									\
444 	__pu_val = (x);							\
445 	if (eva_kernel_access()) {					\
446 		__put_kernel_common(ptr, size);				\
447 	} else {							\
448 		__chk_user_ptr(ptr);					\
449 		__put_user_common(ptr, size);				\
450 	}								\
451 	__pu_err;							\
452 })
453 
454 #define __put_user_check(x, ptr, size)					\
455 ({									\
456 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
457 	__typeof__(*(ptr)) __pu_val = (x);				\
458 	int __pu_err = -EFAULT;						\
459 									\
460 	might_fault();							\
461 	if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {	\
462 		if (eva_kernel_access())				\
463 			__put_kernel_common(__pu_addr, size);		\
464 		else							\
465 			__put_user_common(__pu_addr, size);		\
466 	}								\
467 									\
468 	__pu_err;							\
469 })
470 
471 #define __put_data_asm(insn, ptr)					\
472 {									\
473 	__asm__ __volatile__(						\
474 	"1:	"insn("%z2", "%3")"	# __put_data_asm	\n"	\
475 	"2:							\n"	\
476 	"	.insn						\n"	\
477 	"	.section	.fixup,\"ax\"			\n"	\
478 	"3:	li	%0, %4					\n"	\
479 	"	j	2b					\n"	\
480 	"	.previous					\n"	\
481 	"	.section	__ex_table,\"a\"		\n"	\
482 	"	" __UA_ADDR "	1b, 3b				\n"	\
483 	"	.previous					\n"	\
484 	: "=r" (__pu_err)						\
485 	: "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),			\
486 	  "i" (-EFAULT));						\
487 }
488 
489 #define __put_data_asm_ll32(insn, ptr)					\
490 {									\
491 	__asm__ __volatile__(						\
492 	"1:	"insn("%2", "(%3)")"	# __put_data_asm_ll32	\n"	\
493 	"2:	"insn("%D2", "4(%3)")"				\n"	\
494 	"3:							\n"	\
495 	"	.insn						\n"	\
496 	"	.section	.fixup,\"ax\"			\n"	\
497 	"4:	li	%0, %4					\n"	\
498 	"	j	3b					\n"	\
499 	"	.previous					\n"	\
500 	"	.section	__ex_table,\"a\"		\n"	\
501 	"	" __UA_ADDR "	1b, 4b				\n"	\
502 	"	" __UA_ADDR "	2b, 4b				\n"	\
503 	"	.previous"						\
504 	: "=r" (__pu_err)						\
505 	: "0" (0), "r" (__pu_val), "r" (ptr),				\
506 	  "i" (-EFAULT));						\
507 }
508 
509 extern void __put_user_unknown(void);
510 
511 /*
512  * ul{b,h,w} are macros and there are no equivalent macros for EVA.
513  * EVA unaligned access is handled in the ADE exception handler.
514  */
515 #ifndef CONFIG_EVA
516 /*
517  * put_user_unaligned: - Write a simple value into user space.
518  * @x:	 Value to copy to user space.
519  * @ptr: Destination address, in user space.
520  *
521  * Context: User context only. This function may sleep if pagefaults are
522  *          enabled.
523  *
524  * This macro copies a single simple value from kernel space to user
525  * space.  It supports simple types like char and int, but not larger
526  * data types like structures or arrays.
527  *
528  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
529  * to the result of dereferencing @ptr.
530  *
531  * Returns zero on success, or -EFAULT on error.
532  */
533 #define put_user_unaligned(x,ptr)	\
534 	__put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
535 
536 /*
537  * get_user_unaligned: - Get a simple variable from user space.
538  * @x:	 Variable to store result.
539  * @ptr: Source address, in user space.
540  *
541  * Context: User context only. This function may sleep if pagefaults are
542  *          enabled.
543  *
544  * This macro copies a single simple variable from user space to kernel
545  * space.  It supports simple types like char and int, but not larger
546  * data types like structures or arrays.
547  *
548  * @ptr must have pointer-to-simple-variable type, and the result of
549  * dereferencing @ptr must be assignable to @x without a cast.
550  *
551  * Returns zero on success, or -EFAULT on error.
552  * On error, the variable @x is set to zero.
553  */
554 #define get_user_unaligned(x,ptr) \
555 	__get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
556 
557 /*
558  * __put_user_unaligned: - Write a simple value into user space, with less checking.
559  * @x:	 Value to copy to user space.
560  * @ptr: Destination address, in user space.
561  *
562  * Context: User context only. This function may sleep if pagefaults are
563  *          enabled.
564  *
565  * This macro copies a single simple value from kernel space to user
566  * space.  It supports simple types like char and int, but not larger
567  * data types like structures or arrays.
568  *
569  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
570  * to the result of dereferencing @ptr.
571  *
572  * Caller must check the pointer with access_ok() before calling this
573  * function.
574  *
575  * Returns zero on success, or -EFAULT on error.
576  */
577 #define __put_user_unaligned(x,ptr) \
578 	__put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
579 
580 /*
581  * __get_user_unaligned: - Get a simple variable from user space, with less checking.
582  * @x:	 Variable to store result.
583  * @ptr: Source address, in user space.
584  *
585  * Context: User context only. This function may sleep if pagefaults are
586  *          enabled.
587  *
588  * This macro copies a single simple variable from user space to kernel
589  * space.  It supports simple types like char and int, but not larger
590  * data types like structures or arrays.
591  *
592  * @ptr must have pointer-to-simple-variable type, and the result of
593  * dereferencing @ptr must be assignable to @x without a cast.
594  *
595  * Caller must check the pointer with access_ok() before calling this
596  * function.
597  *
598  * Returns zero on success, or -EFAULT on error.
599  * On error, the variable @x is set to zero.
600  */
601 #define __get_user_unaligned(x,ptr) \
602 	__get_user__unalignednocheck((x),(ptr),sizeof(*(ptr)))
603 
604 /*
605  * Yuck.  We need two variants, one for 64bit operation and one
606  * for 32 bit mode and old iron.
607  */
608 #ifdef CONFIG_32BIT
609 #define __GET_USER_UNALIGNED_DW(val, ptr)				\
610 	__get_user_unaligned_asm_ll32(val, ptr)
611 #endif
612 #ifdef CONFIG_64BIT
613 #define __GET_USER_UNALIGNED_DW(val, ptr)				\
614 	__get_user_unaligned_asm(val, "uld", ptr)
615 #endif
616 
617 extern void __get_user_unaligned_unknown(void);
618 
619 #define __get_user_unaligned_common(val, size, ptr)			\
620 do {									\
621 	switch (size) {							\
622 	case 1: __get_data_asm(val, "lb", ptr); break;			\
623 	case 2: __get_user_unaligned_asm(val, "ulh", ptr); break;	\
624 	case 4: __get_user_unaligned_asm(val, "ulw", ptr); break;	\
625 	case 8: __GET_USER_UNALIGNED_DW(val, ptr); break;		\
626 	default: __get_user_unaligned_unknown(); break;			\
627 	}								\
628 } while (0)
629 
630 #define __get_user_unaligned_nocheck(x,ptr,size)			\
631 ({									\
632 	int __gu_err;							\
633 									\
634 	__get_user_unaligned_common((x), size, ptr);			\
635 	__gu_err;							\
636 })
637 
638 #define __get_user_unaligned_check(x,ptr,size)				\
639 ({									\
640 	int __gu_err = -EFAULT;						\
641 	const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);		\
642 									\
643 	if (likely(access_ok(VERIFY_READ,  __gu_ptr, size)))		\
644 		__get_user_unaligned_common((x), size, __gu_ptr);	\
645 									\
646 	__gu_err;							\
647 })
648 
649 #define __get_data_unaligned_asm(val, insn, addr)			\
650 {									\
651 	long __gu_tmp;							\
652 									\
653 	__asm__ __volatile__(						\
654 	"1:	" insn "	%1, %3				\n"	\
655 	"2:							\n"	\
656 	"	.insn						\n"	\
657 	"	.section .fixup,\"ax\"				\n"	\
658 	"3:	li	%0, %4					\n"	\
659 	"	move	%1, $0					\n"	\
660 	"	j	2b					\n"	\
661 	"	.previous					\n"	\
662 	"	.section __ex_table,\"a\"			\n"	\
663 	"	"__UA_ADDR "\t1b, 3b				\n"	\
664 	"	"__UA_ADDR "\t1b + 4, 3b			\n"	\
665 	"	.previous					\n"	\
666 	: "=r" (__gu_err), "=r" (__gu_tmp)				\
667 	: "0" (0), "o" (__m(addr)), "i" (-EFAULT));			\
668 									\
669 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
670 }
671 
672 /*
673  * Get a long long 64 using 32 bit registers.
674  */
675 #define __get_user_unaligned_asm_ll32(val, addr)			\
676 {									\
677 	unsigned long long __gu_tmp;					\
678 									\
679 	__asm__ __volatile__(						\
680 	"1:	ulw	%1, (%3)				\n"	\
681 	"2:	ulw	%D1, 4(%3)				\n"	\
682 	"	move	%0, $0					\n"	\
683 	"3:							\n"	\
684 	"	.insn						\n"	\
685 	"	.section	.fixup,\"ax\"			\n"	\
686 	"4:	li	%0, %4					\n"	\
687 	"	move	%1, $0					\n"	\
688 	"	move	%D1, $0					\n"	\
689 	"	j	3b					\n"	\
690 	"	.previous					\n"	\
691 	"	.section	__ex_table,\"a\"		\n"	\
692 	"	" __UA_ADDR "	1b, 4b				\n"	\
693 	"	" __UA_ADDR "	1b + 4, 4b			\n"	\
694 	"	" __UA_ADDR "	2b, 4b				\n"	\
695 	"	" __UA_ADDR "	2b + 4, 4b			\n"	\
696 	"	.previous					\n"	\
697 	: "=r" (__gu_err), "=&r" (__gu_tmp)				\
698 	: "0" (0), "r" (addr), "i" (-EFAULT));				\
699 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
700 }
701 
702 /*
703  * Yuck.  We need two variants, one for 64bit operation and one
704  * for 32 bit mode and old iron.
705  */
706 #ifdef CONFIG_32BIT
707 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
708 #endif
709 #ifdef CONFIG_64BIT
710 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
711 #endif
712 
713 #define __put_user_unaligned_common(ptr, size)				\
714 do {									\
715 	switch (size) {							\
716 	case 1: __put_data_asm("sb", ptr); break;			\
717 	case 2: __put_user_unaligned_asm("ush", ptr); break;		\
718 	case 4: __put_user_unaligned_asm("usw", ptr); break;		\
719 	case 8: __PUT_USER_UNALIGNED_DW(ptr); break;			\
720 	default: __put_user_unaligned_unknown(); break;			\
721 } while (0)
722 
723 #define __put_user_unaligned_nocheck(x,ptr,size)			\
724 ({									\
725 	__typeof__(*(ptr)) __pu_val;					\
726 	int __pu_err = 0;						\
727 									\
728 	__pu_val = (x);							\
729 	__put_user_unaligned_common(ptr, size);				\
730 	__pu_err;							\
731 })
732 
733 #define __put_user_unaligned_check(x,ptr,size)				\
734 ({									\
735 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
736 	__typeof__(*(ptr)) __pu_val = (x);				\
737 	int __pu_err = -EFAULT;						\
738 									\
739 	if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size)))		\
740 		__put_user_unaligned_common(__pu_addr, size);		\
741 									\
742 	__pu_err;							\
743 })
744 
745 #define __put_user_unaligned_asm(insn, ptr)				\
746 {									\
747 	__asm__ __volatile__(						\
748 	"1:	" insn "	%z2, %3		# __put_user_unaligned_asm\n" \
749 	"2:							\n"	\
750 	"	.insn						\n"	\
751 	"	.section	.fixup,\"ax\"			\n"	\
752 	"3:	li	%0, %4					\n"	\
753 	"	j	2b					\n"	\
754 	"	.previous					\n"	\
755 	"	.section	__ex_table,\"a\"		\n"	\
756 	"	" __UA_ADDR "	1b, 3b				\n"	\
757 	"	.previous					\n"	\
758 	: "=r" (__pu_err)						\
759 	: "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),			\
760 	  "i" (-EFAULT));						\
761 }
762 
763 #define __put_user_unaligned_asm_ll32(ptr)				\
764 {									\
765 	__asm__ __volatile__(						\
766 	"1:	sw	%2, (%3)	# __put_user_unaligned_asm_ll32 \n" \
767 	"2:	sw	%D2, 4(%3)				\n"	\
768 	"3:							\n"	\
769 	"	.insn						\n"	\
770 	"	.section	.fixup,\"ax\"			\n"	\
771 	"4:	li	%0, %4					\n"	\
772 	"	j	3b					\n"	\
773 	"	.previous					\n"	\
774 	"	.section	__ex_table,\"a\"		\n"	\
775 	"	" __UA_ADDR "	1b, 4b				\n"	\
776 	"	" __UA_ADDR "	1b + 4, 4b			\n"	\
777 	"	" __UA_ADDR "	2b, 4b				\n"	\
778 	"	" __UA_ADDR "	2b + 4, 4b			\n"	\
779 	"	.previous"						\
780 	: "=r" (__pu_err)						\
781 	: "0" (0), "r" (__pu_val), "r" (ptr),				\
782 	  "i" (-EFAULT));						\
783 }
784 
785 extern void __put_user_unaligned_unknown(void);
786 #endif
787 
788 /*
789  * We're generating jump to subroutines which will be outside the range of
790  * jump instructions
791  */
792 #ifdef MODULE
793 #define __MODULE_JAL(destination)					\
794 	".set\tnoat\n\t"						\
795 	__UA_LA "\t$1, " #destination "\n\t"				\
796 	"jalr\t$1\n\t"							\
797 	".set\tat\n\t"
798 #else
799 #define __MODULE_JAL(destination)					\
800 	"jal\t" #destination "\n\t"
801 #endif
802 
803 #if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) &&	\
804 					      defined(CONFIG_CPU_HAS_PREFETCH))
805 #define DADDI_SCRATCH "$3"
806 #else
807 #define DADDI_SCRATCH "$0"
808 #endif
809 
810 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
811 
812 #ifndef CONFIG_EVA
813 #define __invoke_copy_to_user(to, from, n)				\
814 ({									\
815 	register void __user *__cu_to_r __asm__("$4");			\
816 	register const void *__cu_from_r __asm__("$5");			\
817 	register long __cu_len_r __asm__("$6");				\
818 									\
819 	__cu_to_r = (to);						\
820 	__cu_from_r = (from);						\
821 	__cu_len_r = (n);						\
822 	__asm__ __volatile__(						\
823 	__MODULE_JAL(__copy_user)					\
824 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
825 	:								\
826 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
827 	  DADDI_SCRATCH, "memory");					\
828 	__cu_len_r;							\
829 })
830 
831 #define __invoke_copy_to_kernel(to, from, n)				\
832 	__invoke_copy_to_user(to, from, n)
833 
834 #endif
835 
836 /*
837  * __copy_to_user: - Copy a block of data into user space, with less checking.
838  * @to:	  Destination address, in user space.
839  * @from: Source address, in kernel space.
840  * @n:	  Number of bytes to copy.
841  *
842  * Context: User context only. This function may sleep if pagefaults are
843  *          enabled.
844  *
845  * Copy data from kernel space to user space.  Caller must check
846  * the specified block with access_ok() before calling this function.
847  *
848  * Returns number of bytes that could not be copied.
849  * On success, this will be zero.
850  */
851 #define __copy_to_user(to, from, n)					\
852 ({									\
853 	void __user *__cu_to;						\
854 	const void *__cu_from;						\
855 	long __cu_len;							\
856 									\
857 	__cu_to = (to);							\
858 	__cu_from = (from);						\
859 	__cu_len = (n);							\
860 	might_fault();							\
861 	if (eva_kernel_access())					\
862 		__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,	\
863 						   __cu_len);		\
864 	else								\
865 		__cu_len = __invoke_copy_to_user(__cu_to, __cu_from,	\
866 						 __cu_len);		\
867 	__cu_len;							\
868 })
869 
870 extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
871 
872 #define __copy_to_user_inatomic(to, from, n)				\
873 ({									\
874 	void __user *__cu_to;						\
875 	const void *__cu_from;						\
876 	long __cu_len;							\
877 									\
878 	__cu_to = (to);							\
879 	__cu_from = (from);						\
880 	__cu_len = (n);							\
881 	if (eva_kernel_access())					\
882 		__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,	\
883 						   __cu_len);		\
884 	else								\
885 		__cu_len = __invoke_copy_to_user(__cu_to, __cu_from,	\
886 						 __cu_len);		\
887 	__cu_len;							\
888 })
889 
890 #define __copy_from_user_inatomic(to, from, n)				\
891 ({									\
892 	void *__cu_to;							\
893 	const void __user *__cu_from;					\
894 	long __cu_len;							\
895 									\
896 	__cu_to = (to);							\
897 	__cu_from = (from);						\
898 	__cu_len = (n);							\
899 	if (eva_kernel_access())					\
900 		__cu_len = __invoke_copy_from_kernel_inatomic(__cu_to,	\
901 							      __cu_from,\
902 							      __cu_len);\
903 	else								\
904 		__cu_len = __invoke_copy_from_user_inatomic(__cu_to,	\
905 							    __cu_from,	\
906 							    __cu_len);	\
907 	__cu_len;							\
908 })
909 
910 /*
911  * copy_to_user: - Copy a block of data into user space.
912  * @to:	  Destination address, in user space.
913  * @from: Source address, in kernel space.
914  * @n:	  Number of bytes to copy.
915  *
916  * Context: User context only. This function may sleep if pagefaults are
917  *          enabled.
918  *
919  * Copy data from kernel space to user space.
920  *
921  * Returns number of bytes that could not be copied.
922  * On success, this will be zero.
923  */
924 #define copy_to_user(to, from, n)					\
925 ({									\
926 	void __user *__cu_to;						\
927 	const void *__cu_from;						\
928 	long __cu_len;							\
929 									\
930 	__cu_to = (to);							\
931 	__cu_from = (from);						\
932 	__cu_len = (n);							\
933 	if (eva_kernel_access()) {					\
934 		__cu_len = __invoke_copy_to_kernel(__cu_to,		\
935 						   __cu_from,		\
936 						   __cu_len);		\
937 	} else {							\
938 		if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) {       \
939 			might_fault();                                  \
940 			__cu_len = __invoke_copy_to_user(__cu_to,	\
941 							 __cu_from,	\
942 							 __cu_len);     \
943 		}							\
944 	}								\
945 	__cu_len;							\
946 })
947 
948 #ifndef CONFIG_EVA
949 
950 #define __invoke_copy_from_user(to, from, n)				\
951 ({									\
952 	register void *__cu_to_r __asm__("$4");				\
953 	register const void __user *__cu_from_r __asm__("$5");		\
954 	register long __cu_len_r __asm__("$6");				\
955 									\
956 	__cu_to_r = (to);						\
957 	__cu_from_r = (from);						\
958 	__cu_len_r = (n);						\
959 	__asm__ __volatile__(						\
960 	".set\tnoreorder\n\t"						\
961 	__MODULE_JAL(__copy_user)					\
962 	".set\tnoat\n\t"						\
963 	__UA_ADDU "\t$1, %1, %2\n\t"					\
964 	".set\tat\n\t"							\
965 	".set\treorder"							\
966 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
967 	:								\
968 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
969 	  DADDI_SCRATCH, "memory");					\
970 	__cu_len_r;							\
971 })
972 
973 #define __invoke_copy_from_kernel(to, from, n)				\
974 	__invoke_copy_from_user(to, from, n)
975 
976 /* For userland <-> userland operations */
977 #define ___invoke_copy_in_user(to, from, n)				\
978 	__invoke_copy_from_user(to, from, n)
979 
980 /* For kernel <-> kernel operations */
981 #define ___invoke_copy_in_kernel(to, from, n)				\
982 	__invoke_copy_from_user(to, from, n)
983 
984 #define __invoke_copy_from_user_inatomic(to, from, n)			\
985 ({									\
986 	register void *__cu_to_r __asm__("$4");				\
987 	register const void __user *__cu_from_r __asm__("$5");		\
988 	register long __cu_len_r __asm__("$6");				\
989 									\
990 	__cu_to_r = (to);						\
991 	__cu_from_r = (from);						\
992 	__cu_len_r = (n);						\
993 	__asm__ __volatile__(						\
994 	".set\tnoreorder\n\t"						\
995 	__MODULE_JAL(__copy_user_inatomic)				\
996 	".set\tnoat\n\t"						\
997 	__UA_ADDU "\t$1, %1, %2\n\t"					\
998 	".set\tat\n\t"							\
999 	".set\treorder"							\
1000 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
1001 	:								\
1002 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
1003 	  DADDI_SCRATCH, "memory");					\
1004 	__cu_len_r;							\
1005 })
1006 
1007 #define __invoke_copy_from_kernel_inatomic(to, from, n)			\
1008 	__invoke_copy_from_user_inatomic(to, from, n)			\
1009 
1010 #else
1011 
1012 /* EVA specific functions */
1013 
1014 extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
1015 				       size_t __n);
1016 extern size_t __copy_from_user_eva(void *__to, const void *__from,
1017 				   size_t __n);
1018 extern size_t __copy_to_user_eva(void *__to, const void *__from,
1019 				 size_t __n);
1020 extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
1021 
1022 #define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr)	\
1023 ({									\
1024 	register void *__cu_to_r __asm__("$4");				\
1025 	register const void __user *__cu_from_r __asm__("$5");		\
1026 	register long __cu_len_r __asm__("$6");				\
1027 									\
1028 	__cu_to_r = (to);						\
1029 	__cu_from_r = (from);						\
1030 	__cu_len_r = (n);						\
1031 	__asm__ __volatile__(						\
1032 	".set\tnoreorder\n\t"						\
1033 	__MODULE_JAL(func_ptr)						\
1034 	".set\tnoat\n\t"						\
1035 	__UA_ADDU "\t$1, %1, %2\n\t"					\
1036 	".set\tat\n\t"							\
1037 	".set\treorder"							\
1038 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
1039 	:								\
1040 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
1041 	  DADDI_SCRATCH, "memory");					\
1042 	__cu_len_r;							\
1043 })
1044 
1045 #define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr)	\
1046 ({									\
1047 	register void *__cu_to_r __asm__("$4");				\
1048 	register const void __user *__cu_from_r __asm__("$5");		\
1049 	register long __cu_len_r __asm__("$6");				\
1050 									\
1051 	__cu_to_r = (to);						\
1052 	__cu_from_r = (from);						\
1053 	__cu_len_r = (n);						\
1054 	__asm__ __volatile__(						\
1055 	__MODULE_JAL(func_ptr)						\
1056 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
1057 	:								\
1058 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
1059 	  DADDI_SCRATCH, "memory");					\
1060 	__cu_len_r;							\
1061 })
1062 
1063 /*
1064  * Source or destination address is in userland. We need to go through
1065  * the TLB
1066  */
1067 #define __invoke_copy_from_user(to, from, n)				\
1068 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva)
1069 
1070 #define __invoke_copy_from_user_inatomic(to, from, n)			\
1071 	__invoke_copy_from_user_eva_generic(to, from, n,		\
1072 					    __copy_user_inatomic_eva)
1073 
1074 #define __invoke_copy_to_user(to, from, n)				\
1075 	__invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva)
1076 
1077 #define ___invoke_copy_in_user(to, from, n)				\
1078 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva)
1079 
1080 /*
1081  * Source or destination address in the kernel. We are not going through
1082  * the TLB
1083  */
1084 #define __invoke_copy_from_kernel(to, from, n)				\
1085 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1086 
1087 #define __invoke_copy_from_kernel_inatomic(to, from, n)			\
1088 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic)
1089 
1090 #define __invoke_copy_to_kernel(to, from, n)				\
1091 	__invoke_copy_to_user_eva_generic(to, from, n, __copy_user)
1092 
1093 #define ___invoke_copy_in_kernel(to, from, n)				\
1094 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1095 
1096 #endif /* CONFIG_EVA */
1097 
1098 /*
1099  * __copy_from_user: - Copy a block of data from user space, with less checking.
1100  * @to:	  Destination address, in kernel space.
1101  * @from: Source address, in user space.
1102  * @n:	  Number of bytes to copy.
1103  *
1104  * Context: User context only. This function may sleep if pagefaults are
1105  *          enabled.
1106  *
1107  * Copy data from user space to kernel space.  Caller must check
1108  * the specified block with access_ok() before calling this function.
1109  *
1110  * Returns number of bytes that could not be copied.
1111  * On success, this will be zero.
1112  *
1113  * If some data could not be copied, this function will pad the copied
1114  * data to the requested size using zero bytes.
1115  */
1116 #define __copy_from_user(to, from, n)					\
1117 ({									\
1118 	void *__cu_to;							\
1119 	const void __user *__cu_from;					\
1120 	long __cu_len;							\
1121 									\
1122 	__cu_to = (to);							\
1123 	__cu_from = (from);						\
1124 	__cu_len = (n);							\
1125 	might_fault();							\
1126 	__cu_len = __invoke_copy_from_user(__cu_to, __cu_from,		\
1127 					   __cu_len);			\
1128 	__cu_len;							\
1129 })
1130 
1131 /*
1132  * copy_from_user: - Copy a block of data from user space.
1133  * @to:	  Destination address, in kernel space.
1134  * @from: Source address, in user space.
1135  * @n:	  Number of bytes to copy.
1136  *
1137  * Context: User context only. This function may sleep if pagefaults are
1138  *          enabled.
1139  *
1140  * Copy data from user space to kernel space.
1141  *
1142  * Returns number of bytes that could not be copied.
1143  * On success, this will be zero.
1144  *
1145  * If some data could not be copied, this function will pad the copied
1146  * data to the requested size using zero bytes.
1147  */
1148 #define copy_from_user(to, from, n)					\
1149 ({									\
1150 	void *__cu_to;							\
1151 	const void __user *__cu_from;					\
1152 	long __cu_len;							\
1153 									\
1154 	__cu_to = (to);							\
1155 	__cu_from = (from);						\
1156 	__cu_len = (n);							\
1157 	if (eva_kernel_access()) {					\
1158 		__cu_len = __invoke_copy_from_kernel(__cu_to,		\
1159 						     __cu_from,		\
1160 						     __cu_len);		\
1161 	} else {							\
1162 		if (access_ok(VERIFY_READ, __cu_from, __cu_len)) {	\
1163 			might_fault();                                  \
1164 			__cu_len = __invoke_copy_from_user(__cu_to,	\
1165 							   __cu_from,	\
1166 							   __cu_len);   \
1167 		}							\
1168 	}								\
1169 	__cu_len;							\
1170 })
1171 
1172 #define __copy_in_user(to, from, n)					\
1173 ({									\
1174 	void __user *__cu_to;						\
1175 	const void __user *__cu_from;					\
1176 	long __cu_len;							\
1177 									\
1178 	__cu_to = (to);							\
1179 	__cu_from = (from);						\
1180 	__cu_len = (n);							\
1181 	if (eva_kernel_access()) {					\
1182 		__cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from,	\
1183 						    __cu_len);		\
1184 	} else {							\
1185 		might_fault();						\
1186 		__cu_len = ___invoke_copy_in_user(__cu_to, __cu_from,	\
1187 						  __cu_len);		\
1188 	}								\
1189 	__cu_len;							\
1190 })
1191 
1192 #define copy_in_user(to, from, n)					\
1193 ({									\
1194 	void __user *__cu_to;						\
1195 	const void __user *__cu_from;					\
1196 	long __cu_len;							\
1197 									\
1198 	__cu_to = (to);							\
1199 	__cu_from = (from);						\
1200 	__cu_len = (n);							\
1201 	if (eva_kernel_access()) {					\
1202 		__cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from,	\
1203 						    __cu_len);		\
1204 	} else {							\
1205 		if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
1206 			   access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
1207 			might_fault();					\
1208 			__cu_len = ___invoke_copy_in_user(__cu_to,	\
1209 							  __cu_from,	\
1210 							  __cu_len);	\
1211 		}							\
1212 	}								\
1213 	__cu_len;							\
1214 })
1215 
1216 /*
1217  * __clear_user: - Zero a block of memory in user space, with less checking.
1218  * @to:	  Destination address, in user space.
1219  * @n:	  Number of bytes to zero.
1220  *
1221  * Zero a block of memory in user space.  Caller must check
1222  * the specified block with access_ok() before calling this function.
1223  *
1224  * Returns number of bytes that could not be cleared.
1225  * On success, this will be zero.
1226  */
1227 static inline __kernel_size_t
1228 __clear_user(void __user *addr, __kernel_size_t size)
1229 {
1230 	__kernel_size_t res;
1231 
1232 	might_fault();
1233 	__asm__ __volatile__(
1234 		"move\t$4, %1\n\t"
1235 		"move\t$5, $0\n\t"
1236 		"move\t$6, %2\n\t"
1237 		__MODULE_JAL(__bzero)
1238 		"move\t%0, $6"
1239 		: "=r" (res)
1240 		: "r" (addr), "r" (size)
1241 		: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
1242 
1243 	return res;
1244 }
1245 
1246 #define clear_user(addr,n)						\
1247 ({									\
1248 	void __user * __cl_addr = (addr);				\
1249 	unsigned long __cl_size = (n);					\
1250 	if (__cl_size && access_ok(VERIFY_WRITE,			\
1251 					__cl_addr, __cl_size))		\
1252 		__cl_size = __clear_user(__cl_addr, __cl_size);		\
1253 	__cl_size;							\
1254 })
1255 
1256 /*
1257  * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
1258  * @dst:   Destination address, in kernel space.  This buffer must be at
1259  *	   least @count bytes long.
1260  * @src:   Source address, in user space.
1261  * @count: Maximum number of bytes to copy, including the trailing NUL.
1262  *
1263  * Copies a NUL-terminated string from userspace to kernel space.
1264  * Caller must check the specified block with access_ok() before calling
1265  * this function.
1266  *
1267  * On success, returns the length of the string (not including the trailing
1268  * NUL).
1269  *
1270  * If access to userspace fails, returns -EFAULT (some data may have been
1271  * copied).
1272  *
1273  * If @count is smaller than the length of the string, copies @count bytes
1274  * and returns @count.
1275  */
1276 static inline long
1277 __strncpy_from_user(char *__to, const char __user *__from, long __len)
1278 {
1279 	long res;
1280 
1281 	if (eva_kernel_access()) {
1282 		__asm__ __volatile__(
1283 			"move\t$4, %1\n\t"
1284 			"move\t$5, %2\n\t"
1285 			"move\t$6, %3\n\t"
1286 			__MODULE_JAL(__strncpy_from_kernel_nocheck_asm)
1287 			"move\t%0, $2"
1288 			: "=r" (res)
1289 			: "r" (__to), "r" (__from), "r" (__len)
1290 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1291 	} else {
1292 		might_fault();
1293 		__asm__ __volatile__(
1294 			"move\t$4, %1\n\t"
1295 			"move\t$5, %2\n\t"
1296 			"move\t$6, %3\n\t"
1297 			__MODULE_JAL(__strncpy_from_user_nocheck_asm)
1298 			"move\t%0, $2"
1299 			: "=r" (res)
1300 			: "r" (__to), "r" (__from), "r" (__len)
1301 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1302 	}
1303 
1304 	return res;
1305 }
1306 
1307 /*
1308  * strncpy_from_user: - Copy a NUL terminated string from userspace.
1309  * @dst:   Destination address, in kernel space.  This buffer must be at
1310  *	   least @count bytes long.
1311  * @src:   Source address, in user space.
1312  * @count: Maximum number of bytes to copy, including the trailing NUL.
1313  *
1314  * Copies a NUL-terminated string from userspace to kernel space.
1315  *
1316  * On success, returns the length of the string (not including the trailing
1317  * NUL).
1318  *
1319  * If access to userspace fails, returns -EFAULT (some data may have been
1320  * copied).
1321  *
1322  * If @count is smaller than the length of the string, copies @count bytes
1323  * and returns @count.
1324  */
1325 static inline long
1326 strncpy_from_user(char *__to, const char __user *__from, long __len)
1327 {
1328 	long res;
1329 
1330 	if (eva_kernel_access()) {
1331 		__asm__ __volatile__(
1332 			"move\t$4, %1\n\t"
1333 			"move\t$5, %2\n\t"
1334 			"move\t$6, %3\n\t"
1335 			__MODULE_JAL(__strncpy_from_kernel_asm)
1336 			"move\t%0, $2"
1337 			: "=r" (res)
1338 			: "r" (__to), "r" (__from), "r" (__len)
1339 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1340 	} else {
1341 		might_fault();
1342 		__asm__ __volatile__(
1343 			"move\t$4, %1\n\t"
1344 			"move\t$5, %2\n\t"
1345 			"move\t$6, %3\n\t"
1346 			__MODULE_JAL(__strncpy_from_user_asm)
1347 			"move\t%0, $2"
1348 			: "=r" (res)
1349 			: "r" (__to), "r" (__from), "r" (__len)
1350 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1351 	}
1352 
1353 	return res;
1354 }
1355 
1356 /*
1357  * strlen_user: - Get the size of a string in user space.
1358  * @str: The string to measure.
1359  *
1360  * Context: User context only. This function may sleep if pagefaults are
1361  *          enabled.
1362  *
1363  * Get the size of a NUL-terminated string in user space.
1364  *
1365  * Returns the size of the string INCLUDING the terminating NUL.
1366  * On exception, returns 0.
1367  *
1368  * If there is a limit on the length of a valid string, you may wish to
1369  * consider using strnlen_user() instead.
1370  */
1371 static inline long strlen_user(const char __user *s)
1372 {
1373 	long res;
1374 
1375 	if (eva_kernel_access()) {
1376 		__asm__ __volatile__(
1377 			"move\t$4, %1\n\t"
1378 			__MODULE_JAL(__strlen_kernel_asm)
1379 			"move\t%0, $2"
1380 			: "=r" (res)
1381 			: "r" (s)
1382 			: "$2", "$4", __UA_t0, "$31");
1383 	} else {
1384 		might_fault();
1385 		__asm__ __volatile__(
1386 			"move\t$4, %1\n\t"
1387 			__MODULE_JAL(__strlen_kernel_asm)
1388 			"move\t%0, $2"
1389 			: "=r" (res)
1390 			: "r" (s)
1391 			: "$2", "$4", __UA_t0, "$31");
1392 	}
1393 
1394 	return res;
1395 }
1396 
1397 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1398 static inline long __strnlen_user(const char __user *s, long n)
1399 {
1400 	long res;
1401 
1402 	if (eva_kernel_access()) {
1403 		__asm__ __volatile__(
1404 			"move\t$4, %1\n\t"
1405 			"move\t$5, %2\n\t"
1406 			__MODULE_JAL(__strnlen_kernel_nocheck_asm)
1407 			"move\t%0, $2"
1408 			: "=r" (res)
1409 			: "r" (s), "r" (n)
1410 			: "$2", "$4", "$5", __UA_t0, "$31");
1411 	} else {
1412 		might_fault();
1413 		__asm__ __volatile__(
1414 			"move\t$4, %1\n\t"
1415 			"move\t$5, %2\n\t"
1416 			__MODULE_JAL(__strnlen_user_nocheck_asm)
1417 			"move\t%0, $2"
1418 			: "=r" (res)
1419 			: "r" (s), "r" (n)
1420 			: "$2", "$4", "$5", __UA_t0, "$31");
1421 	}
1422 
1423 	return res;
1424 }
1425 
1426 /*
1427  * strnlen_user: - Get the size of a string in user space.
1428  * @str: The string to measure.
1429  *
1430  * Context: User context only. This function may sleep if pagefaults are
1431  *          enabled.
1432  *
1433  * Get the size of a NUL-terminated string in user space.
1434  *
1435  * Returns the size of the string INCLUDING the terminating NUL.
1436  * On exception, returns 0.
1437  * If the string is too long, returns a value greater than @n.
1438  */
1439 static inline long strnlen_user(const char __user *s, long n)
1440 {
1441 	long res;
1442 
1443 	might_fault();
1444 	if (eva_kernel_access()) {
1445 		__asm__ __volatile__(
1446 			"move\t$4, %1\n\t"
1447 			"move\t$5, %2\n\t"
1448 			__MODULE_JAL(__strnlen_kernel_asm)
1449 			"move\t%0, $2"
1450 			: "=r" (res)
1451 			: "r" (s), "r" (n)
1452 			: "$2", "$4", "$5", __UA_t0, "$31");
1453 	} else {
1454 		__asm__ __volatile__(
1455 			"move\t$4, %1\n\t"
1456 			"move\t$5, %2\n\t"
1457 			__MODULE_JAL(__strnlen_user_asm)
1458 			"move\t%0, $2"
1459 			: "=r" (res)
1460 			: "r" (s), "r" (n)
1461 			: "$2", "$4", "$5", __UA_t0, "$31");
1462 	}
1463 
1464 	return res;
1465 }
1466 
1467 struct exception_table_entry
1468 {
1469 	unsigned long insn;
1470 	unsigned long nextinsn;
1471 };
1472 
1473 extern int fixup_exception(struct pt_regs *regs);
1474 
1475 #endif /* _ASM_UACCESS_H */
1476