xref: /openbmc/linux/arch/mips/include/asm/uaccess.h (revision 4e1a33b1)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  * Copyright (C) 2007  Maciej W. Rozycki
9  * Copyright (C) 2014, Imagination Technologies Ltd.
10  */
11 #ifndef _ASM_UACCESS_H
12 #define _ASM_UACCESS_H
13 
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/thread_info.h>
17 #include <linux/string.h>
18 #include <asm/asm-eva.h>
19 #include <asm/extable.h>
20 
21 /*
22  * The fs value determines whether argument validity checking should be
23  * performed or not.  If get_fs() == USER_DS, checking is performed, with
24  * get_fs() == KERNEL_DS, checking is bypassed.
25  *
26  * For historical reasons, these macros are grossly misnamed.
27  */
28 #ifdef CONFIG_32BIT
29 
30 #ifdef CONFIG_KVM_GUEST
31 #define __UA_LIMIT 0x40000000UL
32 #else
33 #define __UA_LIMIT 0x80000000UL
34 #endif
35 
36 #define __UA_ADDR	".word"
37 #define __UA_LA		"la"
38 #define __UA_ADDU	"addu"
39 #define __UA_t0		"$8"
40 #define __UA_t1		"$9"
41 
42 #endif /* CONFIG_32BIT */
43 
44 #ifdef CONFIG_64BIT
45 
46 extern u64 __ua_limit;
47 
48 #define __UA_LIMIT	__ua_limit
49 
50 #define __UA_ADDR	".dword"
51 #define __UA_LA		"dla"
52 #define __UA_ADDU	"daddu"
53 #define __UA_t0		"$12"
54 #define __UA_t1		"$13"
55 
56 #endif /* CONFIG_64BIT */
57 
58 /*
59  * USER_DS is a bitmask that has the bits set that may not be set in a valid
60  * userspace address.  Note that we limit 32-bit userspace to 0x7fff8000 but
61  * the arithmetic we're doing only works if the limit is a power of two, so
62  * we use 0x80000000 here on 32-bit kernels.  If a process passes an invalid
63  * address in this range it's the process's problem, not ours :-)
64  */
65 
66 #ifdef CONFIG_KVM_GUEST
67 #define KERNEL_DS	((mm_segment_t) { 0x80000000UL })
68 #define USER_DS		((mm_segment_t) { 0xC0000000UL })
69 #else
70 #define KERNEL_DS	((mm_segment_t) { 0UL })
71 #define USER_DS		((mm_segment_t) { __UA_LIMIT })
72 #endif
73 
74 #define VERIFY_READ    0
75 #define VERIFY_WRITE   1
76 
77 #define get_ds()	(KERNEL_DS)
78 #define get_fs()	(current_thread_info()->addr_limit)
79 #define set_fs(x)	(current_thread_info()->addr_limit = (x))
80 
81 #define segment_eq(a, b)	((a).seg == (b).seg)
82 
83 /*
84  * eva_kernel_access() - determine whether kernel memory access on an EVA system
85  *
86  * Determines whether memory accesses should be performed to kernel memory
87  * on a system using Extended Virtual Addressing (EVA).
88  *
89  * Return: true if a kernel memory access on an EVA system, else false.
90  */
91 static inline bool eva_kernel_access(void)
92 {
93 	if (!IS_ENABLED(CONFIG_EVA))
94 		return false;
95 
96 	return segment_eq(get_fs(), get_ds());
97 }
98 
99 /*
100  * Is a address valid? This does a straightforward calculation rather
101  * than tests.
102  *
103  * Address valid if:
104  *  - "addr" doesn't have any high-bits set
105  *  - AND "size" doesn't have any high-bits set
106  *  - AND "addr+size" doesn't have any high-bits set
107  *  - OR we are in kernel mode.
108  *
109  * __ua_size() is a trick to avoid runtime checking of positive constant
110  * sizes; for those we already know at compile time that the size is ok.
111  */
112 #define __ua_size(size)							\
113 	((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
114 
115 /*
116  * access_ok: - Checks if a user space pointer is valid
117  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
118  *	  %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
119  *	  to write to a block, it is always safe to read from it.
120  * @addr: User space pointer to start of block to check
121  * @size: Size of block to check
122  *
123  * Context: User context only. This function may sleep if pagefaults are
124  *          enabled.
125  *
126  * Checks if a pointer to a block of memory in user space is valid.
127  *
128  * Returns true (nonzero) if the memory block may be valid, false (zero)
129  * if it is definitely invalid.
130  *
131  * Note that, depending on architecture, this function probably just
132  * checks that the pointer is in the user space range - after calling
133  * this function, memory access functions may still return -EFAULT.
134  */
135 
136 #define __access_mask get_fs().seg
137 
138 #define __access_ok(addr, size, mask)					\
139 ({									\
140 	unsigned long __addr = (unsigned long) (addr);			\
141 	unsigned long __size = size;					\
142 	unsigned long __mask = mask;					\
143 	unsigned long __ok;						\
144 									\
145 	__chk_user_ptr(addr);						\
146 	__ok = (signed long)(__mask & (__addr | (__addr + __size) |	\
147 		__ua_size(__size)));					\
148 	__ok == 0;							\
149 })
150 
151 #define access_ok(type, addr, size)					\
152 	likely(__access_ok((addr), (size), __access_mask))
153 
154 /*
155  * put_user: - Write a simple value into user space.
156  * @x:	 Value to copy to user space.
157  * @ptr: Destination address, in user space.
158  *
159  * Context: User context only. This function may sleep if pagefaults are
160  *          enabled.
161  *
162  * This macro copies a single simple value from kernel space to user
163  * space.  It supports simple types like char and int, but not larger
164  * data types like structures or arrays.
165  *
166  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
167  * to the result of dereferencing @ptr.
168  *
169  * Returns zero on success, or -EFAULT on error.
170  */
171 #define put_user(x,ptr) \
172 	__put_user_check((x), (ptr), sizeof(*(ptr)))
173 
174 /*
175  * get_user: - Get a simple variable from user space.
176  * @x:	 Variable to store result.
177  * @ptr: Source address, in user space.
178  *
179  * Context: User context only. This function may sleep if pagefaults are
180  *          enabled.
181  *
182  * This macro copies a single simple variable from user space to kernel
183  * space.  It supports simple types like char and int, but not larger
184  * data types like structures or arrays.
185  *
186  * @ptr must have pointer-to-simple-variable type, and the result of
187  * dereferencing @ptr must be assignable to @x without a cast.
188  *
189  * Returns zero on success, or -EFAULT on error.
190  * On error, the variable @x is set to zero.
191  */
192 #define get_user(x,ptr) \
193 	__get_user_check((x), (ptr), sizeof(*(ptr)))
194 
195 /*
196  * __put_user: - Write a simple value into user space, with less checking.
197  * @x:	 Value to copy to user space.
198  * @ptr: Destination address, in user space.
199  *
200  * Context: User context only. This function may sleep if pagefaults are
201  *          enabled.
202  *
203  * This macro copies a single simple value from kernel space to user
204  * space.  It supports simple types like char and int, but not larger
205  * data types like structures or arrays.
206  *
207  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
208  * to the result of dereferencing @ptr.
209  *
210  * Caller must check the pointer with access_ok() before calling this
211  * function.
212  *
213  * Returns zero on success, or -EFAULT on error.
214  */
215 #define __put_user(x,ptr) \
216 	__put_user_nocheck((x), (ptr), sizeof(*(ptr)))
217 
218 /*
219  * __get_user: - Get a simple variable from user space, with less checking.
220  * @x:	 Variable to store result.
221  * @ptr: Source address, in user space.
222  *
223  * Context: User context only. This function may sleep if pagefaults are
224  *          enabled.
225  *
226  * This macro copies a single simple variable from user space to kernel
227  * space.  It supports simple types like char and int, but not larger
228  * data types like structures or arrays.
229  *
230  * @ptr must have pointer-to-simple-variable type, and the result of
231  * dereferencing @ptr must be assignable to @x without a cast.
232  *
233  * Caller must check the pointer with access_ok() before calling this
234  * function.
235  *
236  * Returns zero on success, or -EFAULT on error.
237  * On error, the variable @x is set to zero.
238  */
239 #define __get_user(x,ptr) \
240 	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
241 
242 struct __large_struct { unsigned long buf[100]; };
243 #define __m(x) (*(struct __large_struct __user *)(x))
244 
245 /*
246  * Yuck.  We need two variants, one for 64bit operation and one
247  * for 32 bit mode and old iron.
248  */
249 #ifndef CONFIG_EVA
250 #define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
251 #else
252 /*
253  * Kernel specific functions for EVA. We need to use normal load instructions
254  * to read data from kernel when operating in EVA mode. We use these macros to
255  * avoid redefining __get_user_asm for EVA.
256  */
257 #undef _loadd
258 #undef _loadw
259 #undef _loadh
260 #undef _loadb
261 #ifdef CONFIG_32BIT
262 #define _loadd			_loadw
263 #else
264 #define _loadd(reg, addr)	"ld " reg ", " addr
265 #endif
266 #define _loadw(reg, addr)	"lw " reg ", " addr
267 #define _loadh(reg, addr)	"lh " reg ", " addr
268 #define _loadb(reg, addr)	"lb " reg ", " addr
269 
270 #define __get_kernel_common(val, size, ptr)				\
271 do {									\
272 	switch (size) {							\
273 	case 1: __get_data_asm(val, _loadb, ptr); break;		\
274 	case 2: __get_data_asm(val, _loadh, ptr); break;		\
275 	case 4: __get_data_asm(val, _loadw, ptr); break;		\
276 	case 8: __GET_DW(val, _loadd, ptr); break;			\
277 	default: __get_user_unknown(); break;				\
278 	}								\
279 } while (0)
280 #endif
281 
282 #ifdef CONFIG_32BIT
283 #define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
284 #endif
285 #ifdef CONFIG_64BIT
286 #define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
287 #endif
288 
289 extern void __get_user_unknown(void);
290 
291 #define __get_user_common(val, size, ptr)				\
292 do {									\
293 	switch (size) {							\
294 	case 1: __get_data_asm(val, user_lb, ptr); break;		\
295 	case 2: __get_data_asm(val, user_lh, ptr); break;		\
296 	case 4: __get_data_asm(val, user_lw, ptr); break;		\
297 	case 8: __GET_DW(val, user_ld, ptr); break;			\
298 	default: __get_user_unknown(); break;				\
299 	}								\
300 } while (0)
301 
302 #define __get_user_nocheck(x, ptr, size)				\
303 ({									\
304 	int __gu_err;							\
305 									\
306 	if (eva_kernel_access()) {					\
307 		__get_kernel_common((x), size, ptr);			\
308 	} else {							\
309 		__chk_user_ptr(ptr);					\
310 		__get_user_common((x), size, ptr);			\
311 	}								\
312 	__gu_err;							\
313 })
314 
315 #define __get_user_check(x, ptr, size)					\
316 ({									\
317 	int __gu_err = -EFAULT;						\
318 	const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);		\
319 									\
320 	might_fault();							\
321 	if (likely(access_ok(VERIFY_READ,  __gu_ptr, size))) {		\
322 		if (eva_kernel_access())				\
323 			__get_kernel_common((x), size, __gu_ptr);	\
324 		else							\
325 			__get_user_common((x), size, __gu_ptr);		\
326 	} else								\
327 		(x) = 0;						\
328 									\
329 	__gu_err;							\
330 })
331 
332 #define __get_data_asm(val, insn, addr)					\
333 {									\
334 	long __gu_tmp;							\
335 									\
336 	__asm__ __volatile__(						\
337 	"1:	"insn("%1", "%3")"				\n"	\
338 	"2:							\n"	\
339 	"	.insn						\n"	\
340 	"	.section .fixup,\"ax\"				\n"	\
341 	"3:	li	%0, %4					\n"	\
342 	"	move	%1, $0					\n"	\
343 	"	j	2b					\n"	\
344 	"	.previous					\n"	\
345 	"	.section __ex_table,\"a\"			\n"	\
346 	"	"__UA_ADDR "\t1b, 3b				\n"	\
347 	"	.previous					\n"	\
348 	: "=r" (__gu_err), "=r" (__gu_tmp)				\
349 	: "0" (0), "o" (__m(addr)), "i" (-EFAULT));			\
350 									\
351 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
352 }
353 
354 /*
355  * Get a long long 64 using 32 bit registers.
356  */
357 #define __get_data_asm_ll32(val, insn, addr)				\
358 {									\
359 	union {								\
360 		unsigned long long	l;				\
361 		__typeof__(*(addr))	t;				\
362 	} __gu_tmp;							\
363 									\
364 	__asm__ __volatile__(						\
365 	"1:	" insn("%1", "(%3)")"				\n"	\
366 	"2:	" insn("%D1", "4(%3)")"				\n"	\
367 	"3:							\n"	\
368 	"	.insn						\n"	\
369 	"	.section	.fixup,\"ax\"			\n"	\
370 	"4:	li	%0, %4					\n"	\
371 	"	move	%1, $0					\n"	\
372 	"	move	%D1, $0					\n"	\
373 	"	j	3b					\n"	\
374 	"	.previous					\n"	\
375 	"	.section	__ex_table,\"a\"		\n"	\
376 	"	" __UA_ADDR "	1b, 4b				\n"	\
377 	"	" __UA_ADDR "	2b, 4b				\n"	\
378 	"	.previous					\n"	\
379 	: "=r" (__gu_err), "=&r" (__gu_tmp.l)				\
380 	: "0" (0), "r" (addr), "i" (-EFAULT));				\
381 									\
382 	(val) = __gu_tmp.t;						\
383 }
384 
385 #ifndef CONFIG_EVA
386 #define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
387 #else
388 /*
389  * Kernel specific functions for EVA. We need to use normal load instructions
390  * to read data from kernel when operating in EVA mode. We use these macros to
391  * avoid redefining __get_data_asm for EVA.
392  */
393 #undef _stored
394 #undef _storew
395 #undef _storeh
396 #undef _storeb
397 #ifdef CONFIG_32BIT
398 #define _stored			_storew
399 #else
400 #define _stored(reg, addr)	"ld " reg ", " addr
401 #endif
402 
403 #define _storew(reg, addr)	"sw " reg ", " addr
404 #define _storeh(reg, addr)	"sh " reg ", " addr
405 #define _storeb(reg, addr)	"sb " reg ", " addr
406 
407 #define __put_kernel_common(ptr, size)					\
408 do {									\
409 	switch (size) {							\
410 	case 1: __put_data_asm(_storeb, ptr); break;			\
411 	case 2: __put_data_asm(_storeh, ptr); break;			\
412 	case 4: __put_data_asm(_storew, ptr); break;			\
413 	case 8: __PUT_DW(_stored, ptr); break;				\
414 	default: __put_user_unknown(); break;				\
415 	}								\
416 } while(0)
417 #endif
418 
419 /*
420  * Yuck.  We need two variants, one for 64bit operation and one
421  * for 32 bit mode and old iron.
422  */
423 #ifdef CONFIG_32BIT
424 #define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
425 #endif
426 #ifdef CONFIG_64BIT
427 #define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
428 #endif
429 
430 #define __put_user_common(ptr, size)					\
431 do {									\
432 	switch (size) {							\
433 	case 1: __put_data_asm(user_sb, ptr); break;			\
434 	case 2: __put_data_asm(user_sh, ptr); break;			\
435 	case 4: __put_data_asm(user_sw, ptr); break;			\
436 	case 8: __PUT_DW(user_sd, ptr); break;				\
437 	default: __put_user_unknown(); break;				\
438 	}								\
439 } while (0)
440 
441 #define __put_user_nocheck(x, ptr, size)				\
442 ({									\
443 	__typeof__(*(ptr)) __pu_val;					\
444 	int __pu_err = 0;						\
445 									\
446 	__pu_val = (x);							\
447 	if (eva_kernel_access()) {					\
448 		__put_kernel_common(ptr, size);				\
449 	} else {							\
450 		__chk_user_ptr(ptr);					\
451 		__put_user_common(ptr, size);				\
452 	}								\
453 	__pu_err;							\
454 })
455 
456 #define __put_user_check(x, ptr, size)					\
457 ({									\
458 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
459 	__typeof__(*(ptr)) __pu_val = (x);				\
460 	int __pu_err = -EFAULT;						\
461 									\
462 	might_fault();							\
463 	if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {	\
464 		if (eva_kernel_access())				\
465 			__put_kernel_common(__pu_addr, size);		\
466 		else							\
467 			__put_user_common(__pu_addr, size);		\
468 	}								\
469 									\
470 	__pu_err;							\
471 })
472 
473 #define __put_data_asm(insn, ptr)					\
474 {									\
475 	__asm__ __volatile__(						\
476 	"1:	"insn("%z2", "%3")"	# __put_data_asm	\n"	\
477 	"2:							\n"	\
478 	"	.insn						\n"	\
479 	"	.section	.fixup,\"ax\"			\n"	\
480 	"3:	li	%0, %4					\n"	\
481 	"	j	2b					\n"	\
482 	"	.previous					\n"	\
483 	"	.section	__ex_table,\"a\"		\n"	\
484 	"	" __UA_ADDR "	1b, 3b				\n"	\
485 	"	.previous					\n"	\
486 	: "=r" (__pu_err)						\
487 	: "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),			\
488 	  "i" (-EFAULT));						\
489 }
490 
491 #define __put_data_asm_ll32(insn, ptr)					\
492 {									\
493 	__asm__ __volatile__(						\
494 	"1:	"insn("%2", "(%3)")"	# __put_data_asm_ll32	\n"	\
495 	"2:	"insn("%D2", "4(%3)")"				\n"	\
496 	"3:							\n"	\
497 	"	.insn						\n"	\
498 	"	.section	.fixup,\"ax\"			\n"	\
499 	"4:	li	%0, %4					\n"	\
500 	"	j	3b					\n"	\
501 	"	.previous					\n"	\
502 	"	.section	__ex_table,\"a\"		\n"	\
503 	"	" __UA_ADDR "	1b, 4b				\n"	\
504 	"	" __UA_ADDR "	2b, 4b				\n"	\
505 	"	.previous"						\
506 	: "=r" (__pu_err)						\
507 	: "0" (0), "r" (__pu_val), "r" (ptr),				\
508 	  "i" (-EFAULT));						\
509 }
510 
511 extern void __put_user_unknown(void);
512 
513 /*
514  * ul{b,h,w} are macros and there are no equivalent macros for EVA.
515  * EVA unaligned access is handled in the ADE exception handler.
516  */
517 #ifndef CONFIG_EVA
518 /*
519  * put_user_unaligned: - Write a simple value into user space.
520  * @x:	 Value to copy to user space.
521  * @ptr: Destination address, in user space.
522  *
523  * Context: User context only. This function may sleep if pagefaults are
524  *          enabled.
525  *
526  * This macro copies a single simple value from kernel space to user
527  * space.  It supports simple types like char and int, but not larger
528  * data types like structures or arrays.
529  *
530  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
531  * to the result of dereferencing @ptr.
532  *
533  * Returns zero on success, or -EFAULT on error.
534  */
535 #define put_user_unaligned(x,ptr)	\
536 	__put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
537 
538 /*
539  * get_user_unaligned: - Get a simple variable from user space.
540  * @x:	 Variable to store result.
541  * @ptr: Source address, in user space.
542  *
543  * Context: User context only. This function may sleep if pagefaults are
544  *          enabled.
545  *
546  * This macro copies a single simple variable from user space to kernel
547  * space.  It supports simple types like char and int, but not larger
548  * data types like structures or arrays.
549  *
550  * @ptr must have pointer-to-simple-variable type, and the result of
551  * dereferencing @ptr must be assignable to @x without a cast.
552  *
553  * Returns zero on success, or -EFAULT on error.
554  * On error, the variable @x is set to zero.
555  */
556 #define get_user_unaligned(x,ptr) \
557 	__get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
558 
559 /*
560  * __put_user_unaligned: - Write a simple value into user space, with less checking.
561  * @x:	 Value to copy to user space.
562  * @ptr: Destination address, in user space.
563  *
564  * Context: User context only. This function may sleep if pagefaults are
565  *          enabled.
566  *
567  * This macro copies a single simple value from kernel space to user
568  * space.  It supports simple types like char and int, but not larger
569  * data types like structures or arrays.
570  *
571  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
572  * to the result of dereferencing @ptr.
573  *
574  * Caller must check the pointer with access_ok() before calling this
575  * function.
576  *
577  * Returns zero on success, or -EFAULT on error.
578  */
579 #define __put_user_unaligned(x,ptr) \
580 	__put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
581 
582 /*
583  * __get_user_unaligned: - Get a simple variable from user space, with less checking.
584  * @x:	 Variable to store result.
585  * @ptr: Source address, in user space.
586  *
587  * Context: User context only. This function may sleep if pagefaults are
588  *          enabled.
589  *
590  * This macro copies a single simple variable from user space to kernel
591  * space.  It supports simple types like char and int, but not larger
592  * data types like structures or arrays.
593  *
594  * @ptr must have pointer-to-simple-variable type, and the result of
595  * dereferencing @ptr must be assignable to @x without a cast.
596  *
597  * Caller must check the pointer with access_ok() before calling this
598  * function.
599  *
600  * Returns zero on success, or -EFAULT on error.
601  * On error, the variable @x is set to zero.
602  */
603 #define __get_user_unaligned(x,ptr) \
604 	__get_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
605 
606 /*
607  * Yuck.  We need two variants, one for 64bit operation and one
608  * for 32 bit mode and old iron.
609  */
610 #ifdef CONFIG_32BIT
611 #define __GET_USER_UNALIGNED_DW(val, ptr)				\
612 	__get_user_unaligned_asm_ll32(val, ptr)
613 #endif
614 #ifdef CONFIG_64BIT
615 #define __GET_USER_UNALIGNED_DW(val, ptr)				\
616 	__get_user_unaligned_asm(val, "uld", ptr)
617 #endif
618 
619 extern void __get_user_unaligned_unknown(void);
620 
621 #define __get_user_unaligned_common(val, size, ptr)			\
622 do {									\
623 	switch (size) {							\
624 	case 1: __get_data_asm(val, "lb", ptr); break;			\
625 	case 2: __get_data_unaligned_asm(val, "ulh", ptr); break;	\
626 	case 4: __get_data_unaligned_asm(val, "ulw", ptr); break;	\
627 	case 8: __GET_USER_UNALIGNED_DW(val, ptr); break;		\
628 	default: __get_user_unaligned_unknown(); break;			\
629 	}								\
630 } while (0)
631 
632 #define __get_user_unaligned_nocheck(x,ptr,size)			\
633 ({									\
634 	int __gu_err;							\
635 									\
636 	__get_user_unaligned_common((x), size, ptr);			\
637 	__gu_err;							\
638 })
639 
640 #define __get_user_unaligned_check(x,ptr,size)				\
641 ({									\
642 	int __gu_err = -EFAULT;						\
643 	const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);		\
644 									\
645 	if (likely(access_ok(VERIFY_READ,  __gu_ptr, size)))		\
646 		__get_user_unaligned_common((x), size, __gu_ptr);	\
647 									\
648 	__gu_err;							\
649 })
650 
651 #define __get_data_unaligned_asm(val, insn, addr)			\
652 {									\
653 	long __gu_tmp;							\
654 									\
655 	__asm__ __volatile__(						\
656 	"1:	" insn "	%1, %3				\n"	\
657 	"2:							\n"	\
658 	"	.insn						\n"	\
659 	"	.section .fixup,\"ax\"				\n"	\
660 	"3:	li	%0, %4					\n"	\
661 	"	move	%1, $0					\n"	\
662 	"	j	2b					\n"	\
663 	"	.previous					\n"	\
664 	"	.section __ex_table,\"a\"			\n"	\
665 	"	"__UA_ADDR "\t1b, 3b				\n"	\
666 	"	"__UA_ADDR "\t1b + 4, 3b			\n"	\
667 	"	.previous					\n"	\
668 	: "=r" (__gu_err), "=r" (__gu_tmp)				\
669 	: "0" (0), "o" (__m(addr)), "i" (-EFAULT));			\
670 									\
671 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
672 }
673 
674 /*
675  * Get a long long 64 using 32 bit registers.
676  */
677 #define __get_user_unaligned_asm_ll32(val, addr)			\
678 {									\
679 	unsigned long long __gu_tmp;					\
680 									\
681 	__asm__ __volatile__(						\
682 	"1:	ulw	%1, (%3)				\n"	\
683 	"2:	ulw	%D1, 4(%3)				\n"	\
684 	"	move	%0, $0					\n"	\
685 	"3:							\n"	\
686 	"	.insn						\n"	\
687 	"	.section	.fixup,\"ax\"			\n"	\
688 	"4:	li	%0, %4					\n"	\
689 	"	move	%1, $0					\n"	\
690 	"	move	%D1, $0					\n"	\
691 	"	j	3b					\n"	\
692 	"	.previous					\n"	\
693 	"	.section	__ex_table,\"a\"		\n"	\
694 	"	" __UA_ADDR "	1b, 4b				\n"	\
695 	"	" __UA_ADDR "	1b + 4, 4b			\n"	\
696 	"	" __UA_ADDR "	2b, 4b				\n"	\
697 	"	" __UA_ADDR "	2b + 4, 4b			\n"	\
698 	"	.previous					\n"	\
699 	: "=r" (__gu_err), "=&r" (__gu_tmp)				\
700 	: "0" (0), "r" (addr), "i" (-EFAULT));				\
701 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
702 }
703 
704 /*
705  * Yuck.  We need two variants, one for 64bit operation and one
706  * for 32 bit mode and old iron.
707  */
708 #ifdef CONFIG_32BIT
709 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
710 #endif
711 #ifdef CONFIG_64BIT
712 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
713 #endif
714 
715 #define __put_user_unaligned_common(ptr, size)				\
716 do {									\
717 	switch (size) {							\
718 	case 1: __put_data_asm("sb", ptr); break;			\
719 	case 2: __put_user_unaligned_asm("ush", ptr); break;		\
720 	case 4: __put_user_unaligned_asm("usw", ptr); break;		\
721 	case 8: __PUT_USER_UNALIGNED_DW(ptr); break;			\
722 	default: __put_user_unaligned_unknown(); break;			\
723 } while (0)
724 
725 #define __put_user_unaligned_nocheck(x,ptr,size)			\
726 ({									\
727 	__typeof__(*(ptr)) __pu_val;					\
728 	int __pu_err = 0;						\
729 									\
730 	__pu_val = (x);							\
731 	__put_user_unaligned_common(ptr, size);				\
732 	__pu_err;							\
733 })
734 
735 #define __put_user_unaligned_check(x,ptr,size)				\
736 ({									\
737 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
738 	__typeof__(*(ptr)) __pu_val = (x);				\
739 	int __pu_err = -EFAULT;						\
740 									\
741 	if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size)))		\
742 		__put_user_unaligned_common(__pu_addr, size);		\
743 									\
744 	__pu_err;							\
745 })
746 
747 #define __put_user_unaligned_asm(insn, ptr)				\
748 {									\
749 	__asm__ __volatile__(						\
750 	"1:	" insn "	%z2, %3		# __put_user_unaligned_asm\n" \
751 	"2:							\n"	\
752 	"	.insn						\n"	\
753 	"	.section	.fixup,\"ax\"			\n"	\
754 	"3:	li	%0, %4					\n"	\
755 	"	j	2b					\n"	\
756 	"	.previous					\n"	\
757 	"	.section	__ex_table,\"a\"		\n"	\
758 	"	" __UA_ADDR "	1b, 3b				\n"	\
759 	"	.previous					\n"	\
760 	: "=r" (__pu_err)						\
761 	: "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),			\
762 	  "i" (-EFAULT));						\
763 }
764 
765 #define __put_user_unaligned_asm_ll32(ptr)				\
766 {									\
767 	__asm__ __volatile__(						\
768 	"1:	sw	%2, (%3)	# __put_user_unaligned_asm_ll32 \n" \
769 	"2:	sw	%D2, 4(%3)				\n"	\
770 	"3:							\n"	\
771 	"	.insn						\n"	\
772 	"	.section	.fixup,\"ax\"			\n"	\
773 	"4:	li	%0, %4					\n"	\
774 	"	j	3b					\n"	\
775 	"	.previous					\n"	\
776 	"	.section	__ex_table,\"a\"		\n"	\
777 	"	" __UA_ADDR "	1b, 4b				\n"	\
778 	"	" __UA_ADDR "	1b + 4, 4b			\n"	\
779 	"	" __UA_ADDR "	2b, 4b				\n"	\
780 	"	" __UA_ADDR "	2b + 4, 4b			\n"	\
781 	"	.previous"						\
782 	: "=r" (__pu_err)						\
783 	: "0" (0), "r" (__pu_val), "r" (ptr),				\
784 	  "i" (-EFAULT));						\
785 }
786 
787 extern void __put_user_unaligned_unknown(void);
788 #endif
789 
790 /*
791  * We're generating jump to subroutines which will be outside the range of
792  * jump instructions
793  */
794 #ifdef MODULE
795 #define __MODULE_JAL(destination)					\
796 	".set\tnoat\n\t"						\
797 	__UA_LA "\t$1, " #destination "\n\t"				\
798 	"jalr\t$1\n\t"							\
799 	".set\tat\n\t"
800 #else
801 #define __MODULE_JAL(destination)					\
802 	"jal\t" #destination "\n\t"
803 #endif
804 
805 #if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) &&	\
806 					      defined(CONFIG_CPU_HAS_PREFETCH))
807 #define DADDI_SCRATCH "$3"
808 #else
809 #define DADDI_SCRATCH "$0"
810 #endif
811 
812 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
813 
814 #ifndef CONFIG_EVA
815 #define __invoke_copy_to_user(to, from, n)				\
816 ({									\
817 	register void __user *__cu_to_r __asm__("$4");			\
818 	register const void *__cu_from_r __asm__("$5");			\
819 	register long __cu_len_r __asm__("$6");				\
820 									\
821 	__cu_to_r = (to);						\
822 	__cu_from_r = (from);						\
823 	__cu_len_r = (n);						\
824 	__asm__ __volatile__(						\
825 	__MODULE_JAL(__copy_user)					\
826 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
827 	:								\
828 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
829 	  DADDI_SCRATCH, "memory");					\
830 	__cu_len_r;							\
831 })
832 
833 #define __invoke_copy_to_kernel(to, from, n)				\
834 	__invoke_copy_to_user(to, from, n)
835 
836 #endif
837 
838 /*
839  * __copy_to_user: - Copy a block of data into user space, with less checking.
840  * @to:	  Destination address, in user space.
841  * @from: Source address, in kernel space.
842  * @n:	  Number of bytes to copy.
843  *
844  * Context: User context only. This function may sleep if pagefaults are
845  *          enabled.
846  *
847  * Copy data from kernel space to user space.  Caller must check
848  * the specified block with access_ok() before calling this function.
849  *
850  * Returns number of bytes that could not be copied.
851  * On success, this will be zero.
852  */
853 #define __copy_to_user(to, from, n)					\
854 ({									\
855 	void __user *__cu_to;						\
856 	const void *__cu_from;						\
857 	long __cu_len;							\
858 									\
859 	__cu_to = (to);							\
860 	__cu_from = (from);						\
861 	__cu_len = (n);							\
862 									\
863 	check_object_size(__cu_from, __cu_len, true);			\
864 	might_fault();							\
865 									\
866 	if (eva_kernel_access())					\
867 		__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,	\
868 						   __cu_len);		\
869 	else								\
870 		__cu_len = __invoke_copy_to_user(__cu_to, __cu_from,	\
871 						 __cu_len);		\
872 	__cu_len;							\
873 })
874 
875 extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
876 
877 #define __copy_to_user_inatomic(to, from, n)				\
878 ({									\
879 	void __user *__cu_to;						\
880 	const void *__cu_from;						\
881 	long __cu_len;							\
882 									\
883 	__cu_to = (to);							\
884 	__cu_from = (from);						\
885 	__cu_len = (n);							\
886 									\
887 	check_object_size(__cu_from, __cu_len, true);			\
888 									\
889 	if (eva_kernel_access())					\
890 		__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,	\
891 						   __cu_len);		\
892 	else								\
893 		__cu_len = __invoke_copy_to_user(__cu_to, __cu_from,	\
894 						 __cu_len);		\
895 	__cu_len;							\
896 })
897 
898 #define __copy_from_user_inatomic(to, from, n)				\
899 ({									\
900 	void *__cu_to;							\
901 	const void __user *__cu_from;					\
902 	long __cu_len;							\
903 									\
904 	__cu_to = (to);							\
905 	__cu_from = (from);						\
906 	__cu_len = (n);							\
907 									\
908 	check_object_size(__cu_to, __cu_len, false);			\
909 									\
910 	if (eva_kernel_access())					\
911 		__cu_len = __invoke_copy_from_kernel_inatomic(__cu_to,	\
912 							      __cu_from,\
913 							      __cu_len);\
914 	else								\
915 		__cu_len = __invoke_copy_from_user_inatomic(__cu_to,	\
916 							    __cu_from,	\
917 							    __cu_len);	\
918 	__cu_len;							\
919 })
920 
921 /*
922  * copy_to_user: - Copy a block of data into user space.
923  * @to:	  Destination address, in user space.
924  * @from: Source address, in kernel space.
925  * @n:	  Number of bytes to copy.
926  *
927  * Context: User context only. This function may sleep if pagefaults are
928  *          enabled.
929  *
930  * Copy data from kernel space to user space.
931  *
932  * Returns number of bytes that could not be copied.
933  * On success, this will be zero.
934  */
935 #define copy_to_user(to, from, n)					\
936 ({									\
937 	void __user *__cu_to;						\
938 	const void *__cu_from;						\
939 	long __cu_len;							\
940 									\
941 	__cu_to = (to);							\
942 	__cu_from = (from);						\
943 	__cu_len = (n);							\
944 									\
945 	check_object_size(__cu_from, __cu_len, true);			\
946 									\
947 	if (eva_kernel_access()) {					\
948 		__cu_len = __invoke_copy_to_kernel(__cu_to,		\
949 						   __cu_from,		\
950 						   __cu_len);		\
951 	} else {							\
952 		if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) {       \
953 			might_fault();                                  \
954 			__cu_len = __invoke_copy_to_user(__cu_to,	\
955 							 __cu_from,	\
956 							 __cu_len);     \
957 		}							\
958 	}								\
959 	__cu_len;							\
960 })
961 
962 #ifndef CONFIG_EVA
963 
964 #define __invoke_copy_from_user(to, from, n)				\
965 ({									\
966 	register void *__cu_to_r __asm__("$4");				\
967 	register const void __user *__cu_from_r __asm__("$5");		\
968 	register long __cu_len_r __asm__("$6");				\
969 									\
970 	__cu_to_r = (to);						\
971 	__cu_from_r = (from);						\
972 	__cu_len_r = (n);						\
973 	__asm__ __volatile__(						\
974 	".set\tnoreorder\n\t"						\
975 	__MODULE_JAL(__copy_user)					\
976 	".set\tnoat\n\t"						\
977 	__UA_ADDU "\t$1, %1, %2\n\t"					\
978 	".set\tat\n\t"							\
979 	".set\treorder"							\
980 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
981 	:								\
982 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
983 	  DADDI_SCRATCH, "memory");					\
984 	__cu_len_r;							\
985 })
986 
987 #define __invoke_copy_from_kernel(to, from, n)				\
988 	__invoke_copy_from_user(to, from, n)
989 
990 /* For userland <-> userland operations */
991 #define ___invoke_copy_in_user(to, from, n)				\
992 	__invoke_copy_from_user(to, from, n)
993 
994 /* For kernel <-> kernel operations */
995 #define ___invoke_copy_in_kernel(to, from, n)				\
996 	__invoke_copy_from_user(to, from, n)
997 
998 #define __invoke_copy_from_user_inatomic(to, from, n)			\
999 ({									\
1000 	register void *__cu_to_r __asm__("$4");				\
1001 	register const void __user *__cu_from_r __asm__("$5");		\
1002 	register long __cu_len_r __asm__("$6");				\
1003 									\
1004 	__cu_to_r = (to);						\
1005 	__cu_from_r = (from);						\
1006 	__cu_len_r = (n);						\
1007 	__asm__ __volatile__(						\
1008 	".set\tnoreorder\n\t"						\
1009 	__MODULE_JAL(__copy_user_inatomic)				\
1010 	".set\tnoat\n\t"						\
1011 	__UA_ADDU "\t$1, %1, %2\n\t"					\
1012 	".set\tat\n\t"							\
1013 	".set\treorder"							\
1014 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
1015 	:								\
1016 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
1017 	  DADDI_SCRATCH, "memory");					\
1018 	__cu_len_r;							\
1019 })
1020 
1021 #define __invoke_copy_from_kernel_inatomic(to, from, n)			\
1022 	__invoke_copy_from_user_inatomic(to, from, n)			\
1023 
1024 #else
1025 
1026 /* EVA specific functions */
1027 
1028 extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
1029 				       size_t __n);
1030 extern size_t __copy_from_user_eva(void *__to, const void *__from,
1031 				   size_t __n);
1032 extern size_t __copy_to_user_eva(void *__to, const void *__from,
1033 				 size_t __n);
1034 extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
1035 
1036 #define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr)	\
1037 ({									\
1038 	register void *__cu_to_r __asm__("$4");				\
1039 	register const void __user *__cu_from_r __asm__("$5");		\
1040 	register long __cu_len_r __asm__("$6");				\
1041 									\
1042 	__cu_to_r = (to);						\
1043 	__cu_from_r = (from);						\
1044 	__cu_len_r = (n);						\
1045 	__asm__ __volatile__(						\
1046 	".set\tnoreorder\n\t"						\
1047 	__MODULE_JAL(func_ptr)						\
1048 	".set\tnoat\n\t"						\
1049 	__UA_ADDU "\t$1, %1, %2\n\t"					\
1050 	".set\tat\n\t"							\
1051 	".set\treorder"							\
1052 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
1053 	:								\
1054 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
1055 	  DADDI_SCRATCH, "memory");					\
1056 	__cu_len_r;							\
1057 })
1058 
1059 #define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr)	\
1060 ({									\
1061 	register void *__cu_to_r __asm__("$4");				\
1062 	register const void __user *__cu_from_r __asm__("$5");		\
1063 	register long __cu_len_r __asm__("$6");				\
1064 									\
1065 	__cu_to_r = (to);						\
1066 	__cu_from_r = (from);						\
1067 	__cu_len_r = (n);						\
1068 	__asm__ __volatile__(						\
1069 	__MODULE_JAL(func_ptr)						\
1070 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
1071 	:								\
1072 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
1073 	  DADDI_SCRATCH, "memory");					\
1074 	__cu_len_r;							\
1075 })
1076 
1077 /*
1078  * Source or destination address is in userland. We need to go through
1079  * the TLB
1080  */
1081 #define __invoke_copy_from_user(to, from, n)				\
1082 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva)
1083 
1084 #define __invoke_copy_from_user_inatomic(to, from, n)			\
1085 	__invoke_copy_from_user_eva_generic(to, from, n,		\
1086 					    __copy_user_inatomic_eva)
1087 
1088 #define __invoke_copy_to_user(to, from, n)				\
1089 	__invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva)
1090 
1091 #define ___invoke_copy_in_user(to, from, n)				\
1092 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva)
1093 
1094 /*
1095  * Source or destination address in the kernel. We are not going through
1096  * the TLB
1097  */
1098 #define __invoke_copy_from_kernel(to, from, n)				\
1099 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1100 
1101 #define __invoke_copy_from_kernel_inatomic(to, from, n)			\
1102 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic)
1103 
1104 #define __invoke_copy_to_kernel(to, from, n)				\
1105 	__invoke_copy_to_user_eva_generic(to, from, n, __copy_user)
1106 
1107 #define ___invoke_copy_in_kernel(to, from, n)				\
1108 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1109 
1110 #endif /* CONFIG_EVA */
1111 
1112 /*
1113  * __copy_from_user: - Copy a block of data from user space, with less checking.
1114  * @to:	  Destination address, in kernel space.
1115  * @from: Source address, in user space.
1116  * @n:	  Number of bytes to copy.
1117  *
1118  * Context: User context only. This function may sleep if pagefaults are
1119  *          enabled.
1120  *
1121  * Copy data from user space to kernel space.  Caller must check
1122  * the specified block with access_ok() before calling this function.
1123  *
1124  * Returns number of bytes that could not be copied.
1125  * On success, this will be zero.
1126  *
1127  * If some data could not be copied, this function will pad the copied
1128  * data to the requested size using zero bytes.
1129  */
1130 #define __copy_from_user(to, from, n)					\
1131 ({									\
1132 	void *__cu_to;							\
1133 	const void __user *__cu_from;					\
1134 	long __cu_len;							\
1135 									\
1136 	__cu_to = (to);							\
1137 	__cu_from = (from);						\
1138 	__cu_len = (n);							\
1139 									\
1140 	check_object_size(__cu_to, __cu_len, false);			\
1141 									\
1142 	if (eva_kernel_access()) {					\
1143 		__cu_len = __invoke_copy_from_kernel(__cu_to,		\
1144 						     __cu_from,		\
1145 						     __cu_len);		\
1146 	} else {							\
1147 		might_fault();						\
1148 		__cu_len = __invoke_copy_from_user(__cu_to, __cu_from,	\
1149 						   __cu_len);		\
1150 	}								\
1151 	__cu_len;							\
1152 })
1153 
1154 /*
1155  * copy_from_user: - Copy a block of data from user space.
1156  * @to:	  Destination address, in kernel space.
1157  * @from: Source address, in user space.
1158  * @n:	  Number of bytes to copy.
1159  *
1160  * Context: User context only. This function may sleep if pagefaults are
1161  *          enabled.
1162  *
1163  * Copy data from user space to kernel space.
1164  *
1165  * Returns number of bytes that could not be copied.
1166  * On success, this will be zero.
1167  *
1168  * If some data could not be copied, this function will pad the copied
1169  * data to the requested size using zero bytes.
1170  */
1171 #define copy_from_user(to, from, n)					\
1172 ({									\
1173 	void *__cu_to;							\
1174 	const void __user *__cu_from;					\
1175 	long __cu_len;							\
1176 									\
1177 	__cu_to = (to);							\
1178 	__cu_from = (from);						\
1179 	__cu_len = (n);							\
1180 									\
1181 	check_object_size(__cu_to, __cu_len, false);			\
1182 									\
1183 	if (eva_kernel_access()) {					\
1184 		__cu_len = __invoke_copy_from_kernel(__cu_to,		\
1185 						     __cu_from,		\
1186 						     __cu_len);		\
1187 	} else {							\
1188 		if (access_ok(VERIFY_READ, __cu_from, __cu_len)) {	\
1189 			might_fault();                                  \
1190 			__cu_len = __invoke_copy_from_user(__cu_to,	\
1191 							   __cu_from,	\
1192 							   __cu_len);   \
1193 		} else {						\
1194 			memset(__cu_to, 0, __cu_len);			\
1195 		}							\
1196 	}								\
1197 	__cu_len;							\
1198 })
1199 
1200 #define __copy_in_user(to, from, n)					\
1201 ({									\
1202 	void __user *__cu_to;						\
1203 	const void __user *__cu_from;					\
1204 	long __cu_len;							\
1205 									\
1206 	__cu_to = (to);							\
1207 	__cu_from = (from);						\
1208 	__cu_len = (n);							\
1209 	if (eva_kernel_access()) {					\
1210 		__cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from,	\
1211 						    __cu_len);		\
1212 	} else {							\
1213 		might_fault();						\
1214 		__cu_len = ___invoke_copy_in_user(__cu_to, __cu_from,	\
1215 						  __cu_len);		\
1216 	}								\
1217 	__cu_len;							\
1218 })
1219 
1220 #define copy_in_user(to, from, n)					\
1221 ({									\
1222 	void __user *__cu_to;						\
1223 	const void __user *__cu_from;					\
1224 	long __cu_len;							\
1225 									\
1226 	__cu_to = (to);							\
1227 	__cu_from = (from);						\
1228 	__cu_len = (n);							\
1229 	if (eva_kernel_access()) {					\
1230 		__cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from,	\
1231 						    __cu_len);		\
1232 	} else {							\
1233 		if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
1234 			   access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
1235 			might_fault();					\
1236 			__cu_len = ___invoke_copy_in_user(__cu_to,	\
1237 							  __cu_from,	\
1238 							  __cu_len);	\
1239 		}							\
1240 	}								\
1241 	__cu_len;							\
1242 })
1243 
1244 extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size);
1245 extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
1246 
1247 /*
1248  * __clear_user: - Zero a block of memory in user space, with less checking.
1249  * @to:	  Destination address, in user space.
1250  * @n:	  Number of bytes to zero.
1251  *
1252  * Zero a block of memory in user space.  Caller must check
1253  * the specified block with access_ok() before calling this function.
1254  *
1255  * Returns number of bytes that could not be cleared.
1256  * On success, this will be zero.
1257  */
1258 static inline __kernel_size_t
1259 __clear_user(void __user *addr, __kernel_size_t size)
1260 {
1261 	__kernel_size_t res;
1262 
1263 	if (eva_kernel_access()) {
1264 		__asm__ __volatile__(
1265 			"move\t$4, %1\n\t"
1266 			"move\t$5, $0\n\t"
1267 			"move\t$6, %2\n\t"
1268 			__MODULE_JAL(__bzero_kernel)
1269 			"move\t%0, $6"
1270 			: "=r" (res)
1271 			: "r" (addr), "r" (size)
1272 			: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
1273 	} else {
1274 		might_fault();
1275 		__asm__ __volatile__(
1276 			"move\t$4, %1\n\t"
1277 			"move\t$5, $0\n\t"
1278 			"move\t$6, %2\n\t"
1279 			__MODULE_JAL(__bzero)
1280 			"move\t%0, $6"
1281 			: "=r" (res)
1282 			: "r" (addr), "r" (size)
1283 			: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
1284 	}
1285 
1286 	return res;
1287 }
1288 
1289 #define clear_user(addr,n)						\
1290 ({									\
1291 	void __user * __cl_addr = (addr);				\
1292 	unsigned long __cl_size = (n);					\
1293 	if (__cl_size && access_ok(VERIFY_WRITE,			\
1294 					__cl_addr, __cl_size))		\
1295 		__cl_size = __clear_user(__cl_addr, __cl_size);		\
1296 	__cl_size;							\
1297 })
1298 
1299 extern long __strncpy_from_kernel_nocheck_asm(char *__to, const char __user *__from, long __len);
1300 extern long __strncpy_from_user_nocheck_asm(char *__to, const char __user *__from, long __len);
1301 
1302 /*
1303  * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
1304  * @dst:   Destination address, in kernel space.  This buffer must be at
1305  *	   least @count bytes long.
1306  * @src:   Source address, in user space.
1307  * @count: Maximum number of bytes to copy, including the trailing NUL.
1308  *
1309  * Copies a NUL-terminated string from userspace to kernel space.
1310  * Caller must check the specified block with access_ok() before calling
1311  * this function.
1312  *
1313  * On success, returns the length of the string (not including the trailing
1314  * NUL).
1315  *
1316  * If access to userspace fails, returns -EFAULT (some data may have been
1317  * copied).
1318  *
1319  * If @count is smaller than the length of the string, copies @count bytes
1320  * and returns @count.
1321  */
1322 static inline long
1323 __strncpy_from_user(char *__to, const char __user *__from, long __len)
1324 {
1325 	long res;
1326 
1327 	if (eva_kernel_access()) {
1328 		__asm__ __volatile__(
1329 			"move\t$4, %1\n\t"
1330 			"move\t$5, %2\n\t"
1331 			"move\t$6, %3\n\t"
1332 			__MODULE_JAL(__strncpy_from_kernel_nocheck_asm)
1333 			"move\t%0, $2"
1334 			: "=r" (res)
1335 			: "r" (__to), "r" (__from), "r" (__len)
1336 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1337 	} else {
1338 		might_fault();
1339 		__asm__ __volatile__(
1340 			"move\t$4, %1\n\t"
1341 			"move\t$5, %2\n\t"
1342 			"move\t$6, %3\n\t"
1343 			__MODULE_JAL(__strncpy_from_user_nocheck_asm)
1344 			"move\t%0, $2"
1345 			: "=r" (res)
1346 			: "r" (__to), "r" (__from), "r" (__len)
1347 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1348 	}
1349 
1350 	return res;
1351 }
1352 
1353 extern long __strncpy_from_kernel_asm(char *__to, const char __user *__from, long __len);
1354 extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len);
1355 
1356 /*
1357  * strncpy_from_user: - Copy a NUL terminated string from userspace.
1358  * @dst:   Destination address, in kernel space.  This buffer must be at
1359  *	   least @count bytes long.
1360  * @src:   Source address, in user space.
1361  * @count: Maximum number of bytes to copy, including the trailing NUL.
1362  *
1363  * Copies a NUL-terminated string from userspace to kernel space.
1364  *
1365  * On success, returns the length of the string (not including the trailing
1366  * NUL).
1367  *
1368  * If access to userspace fails, returns -EFAULT (some data may have been
1369  * copied).
1370  *
1371  * If @count is smaller than the length of the string, copies @count bytes
1372  * and returns @count.
1373  */
1374 static inline long
1375 strncpy_from_user(char *__to, const char __user *__from, long __len)
1376 {
1377 	long res;
1378 
1379 	if (eva_kernel_access()) {
1380 		__asm__ __volatile__(
1381 			"move\t$4, %1\n\t"
1382 			"move\t$5, %2\n\t"
1383 			"move\t$6, %3\n\t"
1384 			__MODULE_JAL(__strncpy_from_kernel_asm)
1385 			"move\t%0, $2"
1386 			: "=r" (res)
1387 			: "r" (__to), "r" (__from), "r" (__len)
1388 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1389 	} else {
1390 		might_fault();
1391 		__asm__ __volatile__(
1392 			"move\t$4, %1\n\t"
1393 			"move\t$5, %2\n\t"
1394 			"move\t$6, %3\n\t"
1395 			__MODULE_JAL(__strncpy_from_user_asm)
1396 			"move\t%0, $2"
1397 			: "=r" (res)
1398 			: "r" (__to), "r" (__from), "r" (__len)
1399 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1400 	}
1401 
1402 	return res;
1403 }
1404 
1405 extern long __strlen_kernel_asm(const char __user *s);
1406 extern long __strlen_user_asm(const char __user *s);
1407 
1408 /*
1409  * strlen_user: - Get the size of a string in user space.
1410  * @str: The string to measure.
1411  *
1412  * Context: User context only. This function may sleep if pagefaults are
1413  *          enabled.
1414  *
1415  * Get the size of a NUL-terminated string in user space.
1416  *
1417  * Returns the size of the string INCLUDING the terminating NUL.
1418  * On exception, returns 0.
1419  *
1420  * If there is a limit on the length of a valid string, you may wish to
1421  * consider using strnlen_user() instead.
1422  */
1423 static inline long strlen_user(const char __user *s)
1424 {
1425 	long res;
1426 
1427 	if (eva_kernel_access()) {
1428 		__asm__ __volatile__(
1429 			"move\t$4, %1\n\t"
1430 			__MODULE_JAL(__strlen_kernel_asm)
1431 			"move\t%0, $2"
1432 			: "=r" (res)
1433 			: "r" (s)
1434 			: "$2", "$4", __UA_t0, "$31");
1435 	} else {
1436 		might_fault();
1437 		__asm__ __volatile__(
1438 			"move\t$4, %1\n\t"
1439 			__MODULE_JAL(__strlen_user_asm)
1440 			"move\t%0, $2"
1441 			: "=r" (res)
1442 			: "r" (s)
1443 			: "$2", "$4", __UA_t0, "$31");
1444 	}
1445 
1446 	return res;
1447 }
1448 
1449 extern long __strnlen_kernel_nocheck_asm(const char __user *s, long n);
1450 extern long __strnlen_user_nocheck_asm(const char __user *s, long n);
1451 
1452 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1453 static inline long __strnlen_user(const char __user *s, long n)
1454 {
1455 	long res;
1456 
1457 	if (eva_kernel_access()) {
1458 		__asm__ __volatile__(
1459 			"move\t$4, %1\n\t"
1460 			"move\t$5, %2\n\t"
1461 			__MODULE_JAL(__strnlen_kernel_nocheck_asm)
1462 			"move\t%0, $2"
1463 			: "=r" (res)
1464 			: "r" (s), "r" (n)
1465 			: "$2", "$4", "$5", __UA_t0, "$31");
1466 	} else {
1467 		might_fault();
1468 		__asm__ __volatile__(
1469 			"move\t$4, %1\n\t"
1470 			"move\t$5, %2\n\t"
1471 			__MODULE_JAL(__strnlen_user_nocheck_asm)
1472 			"move\t%0, $2"
1473 			: "=r" (res)
1474 			: "r" (s), "r" (n)
1475 			: "$2", "$4", "$5", __UA_t0, "$31");
1476 	}
1477 
1478 	return res;
1479 }
1480 
1481 extern long __strnlen_kernel_asm(const char __user *s, long n);
1482 extern long __strnlen_user_asm(const char __user *s, long n);
1483 
1484 /*
1485  * strnlen_user: - Get the size of a string in user space.
1486  * @str: The string to measure.
1487  *
1488  * Context: User context only. This function may sleep if pagefaults are
1489  *          enabled.
1490  *
1491  * Get the size of a NUL-terminated string in user space.
1492  *
1493  * Returns the size of the string INCLUDING the terminating NUL.
1494  * On exception, returns 0.
1495  * If the string is too long, returns a value greater than @n.
1496  */
1497 static inline long strnlen_user(const char __user *s, long n)
1498 {
1499 	long res;
1500 
1501 	might_fault();
1502 	if (eva_kernel_access()) {
1503 		__asm__ __volatile__(
1504 			"move\t$4, %1\n\t"
1505 			"move\t$5, %2\n\t"
1506 			__MODULE_JAL(__strnlen_kernel_asm)
1507 			"move\t%0, $2"
1508 			: "=r" (res)
1509 			: "r" (s), "r" (n)
1510 			: "$2", "$4", "$5", __UA_t0, "$31");
1511 	} else {
1512 		__asm__ __volatile__(
1513 			"move\t$4, %1\n\t"
1514 			"move\t$5, %2\n\t"
1515 			__MODULE_JAL(__strnlen_user_asm)
1516 			"move\t%0, $2"
1517 			: "=r" (res)
1518 			: "r" (s), "r" (n)
1519 			: "$2", "$4", "$5", __UA_t0, "$31");
1520 	}
1521 
1522 	return res;
1523 }
1524 
1525 #endif /* _ASM_UACCESS_H */
1526