xref: /openbmc/linux/arch/mips/include/asm/uaccess.h (revision 45471cd98decae5fced8b38e46c223f54a924814)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  * Copyright (C) 2007  Maciej W. Rozycki
9  * Copyright (C) 2014, Imagination Technologies Ltd.
10  */
11 #ifndef _ASM_UACCESS_H
12 #define _ASM_UACCESS_H
13 
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/thread_info.h>
17 #include <asm/asm-eva.h>
18 
19 /*
20  * The fs value determines whether argument validity checking should be
21  * performed or not.  If get_fs() == USER_DS, checking is performed, with
22  * get_fs() == KERNEL_DS, checking is bypassed.
23  *
24  * For historical reasons, these macros are grossly misnamed.
25  */
26 #ifdef CONFIG_32BIT
27 
28 #ifdef CONFIG_KVM_GUEST
29 #define __UA_LIMIT 0x40000000UL
30 #else
31 #define __UA_LIMIT 0x80000000UL
32 #endif
33 
34 #define __UA_ADDR	".word"
35 #define __UA_LA		"la"
36 #define __UA_ADDU	"addu"
37 #define __UA_t0		"$8"
38 #define __UA_t1		"$9"
39 
40 #endif /* CONFIG_32BIT */
41 
42 #ifdef CONFIG_64BIT
43 
44 extern u64 __ua_limit;
45 
46 #define __UA_LIMIT	__ua_limit
47 
48 #define __UA_ADDR	".dword"
49 #define __UA_LA		"dla"
50 #define __UA_ADDU	"daddu"
51 #define __UA_t0		"$12"
52 #define __UA_t1		"$13"
53 
54 #endif /* CONFIG_64BIT */
55 
56 /*
57  * USER_DS is a bitmask that has the bits set that may not be set in a valid
58  * userspace address.  Note that we limit 32-bit userspace to 0x7fff8000 but
59  * the arithmetic we're doing only works if the limit is a power of two, so
60  * we use 0x80000000 here on 32-bit kernels.  If a process passes an invalid
61  * address in this range it's the process's problem, not ours :-)
62  */
63 
64 #ifdef CONFIG_KVM_GUEST
65 #define KERNEL_DS	((mm_segment_t) { 0x80000000UL })
66 #define USER_DS		((mm_segment_t) { 0xC0000000UL })
67 #else
68 #define KERNEL_DS	((mm_segment_t) { 0UL })
69 #define USER_DS		((mm_segment_t) { __UA_LIMIT })
70 #endif
71 
72 #define VERIFY_READ    0
73 #define VERIFY_WRITE   1
74 
75 #define get_ds()	(KERNEL_DS)
76 #define get_fs()	(current_thread_info()->addr_limit)
77 #define set_fs(x)	(current_thread_info()->addr_limit = (x))
78 
79 #define segment_eq(a, b)	((a).seg == (b).seg)
80 
81 
82 /*
83  * Is a address valid? This does a straighforward calculation rather
84  * than tests.
85  *
86  * Address valid if:
87  *  - "addr" doesn't have any high-bits set
88  *  - AND "size" doesn't have any high-bits set
89  *  - AND "addr+size" doesn't have any high-bits set
90  *  - OR we are in kernel mode.
91  *
92  * __ua_size() is a trick to avoid runtime checking of positive constant
93  * sizes; for those we already know at compile time that the size is ok.
94  */
95 #define __ua_size(size)							\
96 	((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
97 
98 /*
99  * access_ok: - Checks if a user space pointer is valid
100  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
101  *	  %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
102  *	  to write to a block, it is always safe to read from it.
103  * @addr: User space pointer to start of block to check
104  * @size: Size of block to check
105  *
106  * Context: User context only. This function may sleep if pagefaults are
107  *          enabled.
108  *
109  * Checks if a pointer to a block of memory in user space is valid.
110  *
111  * Returns true (nonzero) if the memory block may be valid, false (zero)
112  * if it is definitely invalid.
113  *
114  * Note that, depending on architecture, this function probably just
115  * checks that the pointer is in the user space range - after calling
116  * this function, memory access functions may still return -EFAULT.
117  */
118 
119 #define __access_mask get_fs().seg
120 
121 #define __access_ok(addr, size, mask)					\
122 ({									\
123 	unsigned long __addr = (unsigned long) (addr);			\
124 	unsigned long __size = size;					\
125 	unsigned long __mask = mask;					\
126 	unsigned long __ok;						\
127 									\
128 	__chk_user_ptr(addr);						\
129 	__ok = (signed long)(__mask & (__addr | (__addr + __size) |	\
130 		__ua_size(__size)));					\
131 	__ok == 0;							\
132 })
133 
134 #define access_ok(type, addr, size)					\
135 	likely(__access_ok((addr), (size), __access_mask))
136 
137 /*
138  * put_user: - Write a simple value into user space.
139  * @x:	 Value to copy to user space.
140  * @ptr: Destination address, in user space.
141  *
142  * Context: User context only. This function may sleep if pagefaults are
143  *          enabled.
144  *
145  * This macro copies a single simple value from kernel space to user
146  * space.  It supports simple types like char and int, but not larger
147  * data types like structures or arrays.
148  *
149  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
150  * to the result of dereferencing @ptr.
151  *
152  * Returns zero on success, or -EFAULT on error.
153  */
154 #define put_user(x,ptr) \
155 	__put_user_check((x), (ptr), sizeof(*(ptr)))
156 
157 /*
158  * get_user: - Get a simple variable from user space.
159  * @x:	 Variable to store result.
160  * @ptr: Source address, in user space.
161  *
162  * Context: User context only. This function may sleep if pagefaults are
163  *          enabled.
164  *
165  * This macro copies a single simple variable from user space to kernel
166  * space.  It supports simple types like char and int, but not larger
167  * data types like structures or arrays.
168  *
169  * @ptr must have pointer-to-simple-variable type, and the result of
170  * dereferencing @ptr must be assignable to @x without a cast.
171  *
172  * Returns zero on success, or -EFAULT on error.
173  * On error, the variable @x is set to zero.
174  */
175 #define get_user(x,ptr) \
176 	__get_user_check((x), (ptr), sizeof(*(ptr)))
177 
178 /*
179  * __put_user: - Write a simple value into user space, with less checking.
180  * @x:	 Value to copy to user space.
181  * @ptr: Destination address, in user space.
182  *
183  * Context: User context only. This function may sleep if pagefaults are
184  *          enabled.
185  *
186  * This macro copies a single simple value from kernel space to user
187  * space.  It supports simple types like char and int, but not larger
188  * data types like structures or arrays.
189  *
190  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
191  * to the result of dereferencing @ptr.
192  *
193  * Caller must check the pointer with access_ok() before calling this
194  * function.
195  *
196  * Returns zero on success, or -EFAULT on error.
197  */
198 #define __put_user(x,ptr) \
199 	__put_user_nocheck((x), (ptr), sizeof(*(ptr)))
200 
201 /*
202  * __get_user: - Get a simple variable from user space, with less checking.
203  * @x:	 Variable to store result.
204  * @ptr: Source address, in user space.
205  *
206  * Context: User context only. This function may sleep if pagefaults are
207  *          enabled.
208  *
209  * This macro copies a single simple variable from user space to kernel
210  * space.  It supports simple types like char and int, but not larger
211  * data types like structures or arrays.
212  *
213  * @ptr must have pointer-to-simple-variable type, and the result of
214  * dereferencing @ptr must be assignable to @x without a cast.
215  *
216  * Caller must check the pointer with access_ok() before calling this
217  * function.
218  *
219  * Returns zero on success, or -EFAULT on error.
220  * On error, the variable @x is set to zero.
221  */
222 #define __get_user(x,ptr) \
223 	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
224 
225 struct __large_struct { unsigned long buf[100]; };
226 #define __m(x) (*(struct __large_struct __user *)(x))
227 
228 /*
229  * Yuck.  We need two variants, one for 64bit operation and one
230  * for 32 bit mode and old iron.
231  */
232 #ifndef CONFIG_EVA
233 #define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
234 #else
235 /*
236  * Kernel specific functions for EVA. We need to use normal load instructions
237  * to read data from kernel when operating in EVA mode. We use these macros to
238  * avoid redefining __get_user_asm for EVA.
239  */
240 #undef _loadd
241 #undef _loadw
242 #undef _loadh
243 #undef _loadb
244 #ifdef CONFIG_32BIT
245 #define _loadd			_loadw
246 #else
247 #define _loadd(reg, addr)	"ld " reg ", " addr
248 #endif
249 #define _loadw(reg, addr)	"lw " reg ", " addr
250 #define _loadh(reg, addr)	"lh " reg ", " addr
251 #define _loadb(reg, addr)	"lb " reg ", " addr
252 
253 #define __get_kernel_common(val, size, ptr)				\
254 do {									\
255 	switch (size) {							\
256 	case 1: __get_data_asm(val, _loadb, ptr); break;		\
257 	case 2: __get_data_asm(val, _loadh, ptr); break;		\
258 	case 4: __get_data_asm(val, _loadw, ptr); break;		\
259 	case 8: __GET_DW(val, _loadd, ptr); break;			\
260 	default: __get_user_unknown(); break;				\
261 	}								\
262 } while (0)
263 #endif
264 
265 #ifdef CONFIG_32BIT
266 #define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
267 #endif
268 #ifdef CONFIG_64BIT
269 #define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
270 #endif
271 
272 extern void __get_user_unknown(void);
273 
274 #define __get_user_common(val, size, ptr)				\
275 do {									\
276 	switch (size) {							\
277 	case 1: __get_data_asm(val, user_lb, ptr); break;		\
278 	case 2: __get_data_asm(val, user_lh, ptr); break;		\
279 	case 4: __get_data_asm(val, user_lw, ptr); break;		\
280 	case 8: __GET_DW(val, user_ld, ptr); break;			\
281 	default: __get_user_unknown(); break;				\
282 	}								\
283 } while (0)
284 
285 #define __get_user_nocheck(x, ptr, size)				\
286 ({									\
287 	int __gu_err;							\
288 									\
289 	if (segment_eq(get_fs(), get_ds())) {				\
290 		__get_kernel_common((x), size, ptr);			\
291 	} else {							\
292 		__chk_user_ptr(ptr);					\
293 		__get_user_common((x), size, ptr);			\
294 	}								\
295 	__gu_err;							\
296 })
297 
298 #define __get_user_check(x, ptr, size)					\
299 ({									\
300 	int __gu_err = -EFAULT;						\
301 	const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);		\
302 									\
303 	might_fault();							\
304 	if (likely(access_ok(VERIFY_READ,  __gu_ptr, size))) {		\
305 		if (segment_eq(get_fs(), get_ds()))			\
306 			__get_kernel_common((x), size, __gu_ptr);	\
307 		else							\
308 			__get_user_common((x), size, __gu_ptr);		\
309 	} else								\
310 		(x) = 0;						\
311 									\
312 	__gu_err;							\
313 })
314 
315 #define __get_data_asm(val, insn, addr)					\
316 {									\
317 	long __gu_tmp;							\
318 									\
319 	__asm__ __volatile__(						\
320 	"1:	"insn("%1", "%3")"				\n"	\
321 	"2:							\n"	\
322 	"	.insn						\n"	\
323 	"	.section .fixup,\"ax\"				\n"	\
324 	"3:	li	%0, %4					\n"	\
325 	"	move	%1, $0					\n"	\
326 	"	j	2b					\n"	\
327 	"	.previous					\n"	\
328 	"	.section __ex_table,\"a\"			\n"	\
329 	"	"__UA_ADDR "\t1b, 3b				\n"	\
330 	"	.previous					\n"	\
331 	: "=r" (__gu_err), "=r" (__gu_tmp)				\
332 	: "0" (0), "o" (__m(addr)), "i" (-EFAULT));			\
333 									\
334 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
335 }
336 
337 /*
338  * Get a long long 64 using 32 bit registers.
339  */
340 #define __get_data_asm_ll32(val, insn, addr)				\
341 {									\
342 	union {								\
343 		unsigned long long	l;				\
344 		__typeof__(*(addr))	t;				\
345 	} __gu_tmp;							\
346 									\
347 	__asm__ __volatile__(						\
348 	"1:	" insn("%1", "(%3)")"				\n"	\
349 	"2:	" insn("%D1", "4(%3)")"				\n"	\
350 	"3:							\n"	\
351 	"	.insn						\n"	\
352 	"	.section	.fixup,\"ax\"			\n"	\
353 	"4:	li	%0, %4					\n"	\
354 	"	move	%1, $0					\n"	\
355 	"	move	%D1, $0					\n"	\
356 	"	j	3b					\n"	\
357 	"	.previous					\n"	\
358 	"	.section	__ex_table,\"a\"		\n"	\
359 	"	" __UA_ADDR "	1b, 4b				\n"	\
360 	"	" __UA_ADDR "	2b, 4b				\n"	\
361 	"	.previous					\n"	\
362 	: "=r" (__gu_err), "=&r" (__gu_tmp.l)				\
363 	: "0" (0), "r" (addr), "i" (-EFAULT));				\
364 									\
365 	(val) = __gu_tmp.t;						\
366 }
367 
368 #ifndef CONFIG_EVA
369 #define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
370 #else
371 /*
372  * Kernel specific functions for EVA. We need to use normal load instructions
373  * to read data from kernel when operating in EVA mode. We use these macros to
374  * avoid redefining __get_data_asm for EVA.
375  */
376 #undef _stored
377 #undef _storew
378 #undef _storeh
379 #undef _storeb
380 #ifdef CONFIG_32BIT
381 #define _stored			_storew
382 #else
383 #define _stored(reg, addr)	"ld " reg ", " addr
384 #endif
385 
386 #define _storew(reg, addr)	"sw " reg ", " addr
387 #define _storeh(reg, addr)	"sh " reg ", " addr
388 #define _storeb(reg, addr)	"sb " reg ", " addr
389 
390 #define __put_kernel_common(ptr, size)					\
391 do {									\
392 	switch (size) {							\
393 	case 1: __put_data_asm(_storeb, ptr); break;			\
394 	case 2: __put_data_asm(_storeh, ptr); break;			\
395 	case 4: __put_data_asm(_storew, ptr); break;			\
396 	case 8: __PUT_DW(_stored, ptr); break;				\
397 	default: __put_user_unknown(); break;				\
398 	}								\
399 } while(0)
400 #endif
401 
402 /*
403  * Yuck.  We need two variants, one for 64bit operation and one
404  * for 32 bit mode and old iron.
405  */
406 #ifdef CONFIG_32BIT
407 #define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
408 #endif
409 #ifdef CONFIG_64BIT
410 #define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
411 #endif
412 
413 #define __put_user_common(ptr, size)					\
414 do {									\
415 	switch (size) {							\
416 	case 1: __put_data_asm(user_sb, ptr); break;			\
417 	case 2: __put_data_asm(user_sh, ptr); break;			\
418 	case 4: __put_data_asm(user_sw, ptr); break;			\
419 	case 8: __PUT_DW(user_sd, ptr); break;				\
420 	default: __put_user_unknown(); break;				\
421 	}								\
422 } while (0)
423 
424 #define __put_user_nocheck(x, ptr, size)				\
425 ({									\
426 	__typeof__(*(ptr)) __pu_val;					\
427 	int __pu_err = 0;						\
428 									\
429 	__pu_val = (x);							\
430 	if (segment_eq(get_fs(), get_ds())) {				\
431 		__put_kernel_common(ptr, size);				\
432 	} else {							\
433 		__chk_user_ptr(ptr);					\
434 		__put_user_common(ptr, size);				\
435 	}								\
436 	__pu_err;							\
437 })
438 
439 #define __put_user_check(x, ptr, size)					\
440 ({									\
441 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
442 	__typeof__(*(ptr)) __pu_val = (x);				\
443 	int __pu_err = -EFAULT;						\
444 									\
445 	might_fault();							\
446 	if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {	\
447 		if (segment_eq(get_fs(), get_ds()))			\
448 			__put_kernel_common(__pu_addr, size);		\
449 		else							\
450 			__put_user_common(__pu_addr, size);		\
451 	}								\
452 									\
453 	__pu_err;							\
454 })
455 
456 #define __put_data_asm(insn, ptr)					\
457 {									\
458 	__asm__ __volatile__(						\
459 	"1:	"insn("%z2", "%3")"	# __put_data_asm	\n"	\
460 	"2:							\n"	\
461 	"	.insn						\n"	\
462 	"	.section	.fixup,\"ax\"			\n"	\
463 	"3:	li	%0, %4					\n"	\
464 	"	j	2b					\n"	\
465 	"	.previous					\n"	\
466 	"	.section	__ex_table,\"a\"		\n"	\
467 	"	" __UA_ADDR "	1b, 3b				\n"	\
468 	"	.previous					\n"	\
469 	: "=r" (__pu_err)						\
470 	: "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),			\
471 	  "i" (-EFAULT));						\
472 }
473 
474 #define __put_data_asm_ll32(insn, ptr)					\
475 {									\
476 	__asm__ __volatile__(						\
477 	"1:	"insn("%2", "(%3)")"	# __put_data_asm_ll32	\n"	\
478 	"2:	"insn("%D2", "4(%3)")"				\n"	\
479 	"3:							\n"	\
480 	"	.insn						\n"	\
481 	"	.section	.fixup,\"ax\"			\n"	\
482 	"4:	li	%0, %4					\n"	\
483 	"	j	3b					\n"	\
484 	"	.previous					\n"	\
485 	"	.section	__ex_table,\"a\"		\n"	\
486 	"	" __UA_ADDR "	1b, 4b				\n"	\
487 	"	" __UA_ADDR "	2b, 4b				\n"	\
488 	"	.previous"						\
489 	: "=r" (__pu_err)						\
490 	: "0" (0), "r" (__pu_val), "r" (ptr),				\
491 	  "i" (-EFAULT));						\
492 }
493 
494 extern void __put_user_unknown(void);
495 
496 /*
497  * ul{b,h,w} are macros and there are no equivalent macros for EVA.
498  * EVA unaligned access is handled in the ADE exception handler.
499  */
500 #ifndef CONFIG_EVA
501 /*
502  * put_user_unaligned: - Write a simple value into user space.
503  * @x:	 Value to copy to user space.
504  * @ptr: Destination address, in user space.
505  *
506  * Context: User context only. This function may sleep if pagefaults are
507  *          enabled.
508  *
509  * This macro copies a single simple value from kernel space to user
510  * space.  It supports simple types like char and int, but not larger
511  * data types like structures or arrays.
512  *
513  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
514  * to the result of dereferencing @ptr.
515  *
516  * Returns zero on success, or -EFAULT on error.
517  */
518 #define put_user_unaligned(x,ptr)	\
519 	__put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
520 
521 /*
522  * get_user_unaligned: - Get a simple variable from user space.
523  * @x:	 Variable to store result.
524  * @ptr: Source address, in user space.
525  *
526  * Context: User context only. This function may sleep if pagefaults are
527  *          enabled.
528  *
529  * This macro copies a single simple variable from user space to kernel
530  * space.  It supports simple types like char and int, but not larger
531  * data types like structures or arrays.
532  *
533  * @ptr must have pointer-to-simple-variable type, and the result of
534  * dereferencing @ptr must be assignable to @x without a cast.
535  *
536  * Returns zero on success, or -EFAULT on error.
537  * On error, the variable @x is set to zero.
538  */
539 #define get_user_unaligned(x,ptr) \
540 	__get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
541 
542 /*
543  * __put_user_unaligned: - Write a simple value into user space, with less checking.
544  * @x:	 Value to copy to user space.
545  * @ptr: Destination address, in user space.
546  *
547  * Context: User context only. This function may sleep if pagefaults are
548  *          enabled.
549  *
550  * This macro copies a single simple value from kernel space to user
551  * space.  It supports simple types like char and int, but not larger
552  * data types like structures or arrays.
553  *
554  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
555  * to the result of dereferencing @ptr.
556  *
557  * Caller must check the pointer with access_ok() before calling this
558  * function.
559  *
560  * Returns zero on success, or -EFAULT on error.
561  */
562 #define __put_user_unaligned(x,ptr) \
563 	__put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
564 
565 /*
566  * __get_user_unaligned: - Get a simple variable from user space, with less checking.
567  * @x:	 Variable to store result.
568  * @ptr: Source address, in user space.
569  *
570  * Context: User context only. This function may sleep if pagefaults are
571  *          enabled.
572  *
573  * This macro copies a single simple variable from user space to kernel
574  * space.  It supports simple types like char and int, but not larger
575  * data types like structures or arrays.
576  *
577  * @ptr must have pointer-to-simple-variable type, and the result of
578  * dereferencing @ptr must be assignable to @x without a cast.
579  *
580  * Caller must check the pointer with access_ok() before calling this
581  * function.
582  *
583  * Returns zero on success, or -EFAULT on error.
584  * On error, the variable @x is set to zero.
585  */
586 #define __get_user_unaligned(x,ptr) \
587 	__get_user__unalignednocheck((x),(ptr),sizeof(*(ptr)))
588 
589 /*
590  * Yuck.  We need two variants, one for 64bit operation and one
591  * for 32 bit mode and old iron.
592  */
593 #ifdef CONFIG_32BIT
594 #define __GET_USER_UNALIGNED_DW(val, ptr)				\
595 	__get_user_unaligned_asm_ll32(val, ptr)
596 #endif
597 #ifdef CONFIG_64BIT
598 #define __GET_USER_UNALIGNED_DW(val, ptr)				\
599 	__get_user_unaligned_asm(val, "uld", ptr)
600 #endif
601 
602 extern void __get_user_unaligned_unknown(void);
603 
604 #define __get_user_unaligned_common(val, size, ptr)			\
605 do {									\
606 	switch (size) {							\
607 	case 1: __get_data_asm(val, "lb", ptr); break;			\
608 	case 2: __get_user_unaligned_asm(val, "ulh", ptr); break;	\
609 	case 4: __get_user_unaligned_asm(val, "ulw", ptr); break;	\
610 	case 8: __GET_USER_UNALIGNED_DW(val, ptr); break;		\
611 	default: __get_user_unaligned_unknown(); break;			\
612 	}								\
613 } while (0)
614 
615 #define __get_user_unaligned_nocheck(x,ptr,size)			\
616 ({									\
617 	int __gu_err;							\
618 									\
619 	__get_user_unaligned_common((x), size, ptr);			\
620 	__gu_err;							\
621 })
622 
623 #define __get_user_unaligned_check(x,ptr,size)				\
624 ({									\
625 	int __gu_err = -EFAULT;						\
626 	const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);		\
627 									\
628 	if (likely(access_ok(VERIFY_READ,  __gu_ptr, size)))		\
629 		__get_user_unaligned_common((x), size, __gu_ptr);	\
630 									\
631 	__gu_err;							\
632 })
633 
634 #define __get_data_unaligned_asm(val, insn, addr)			\
635 {									\
636 	long __gu_tmp;							\
637 									\
638 	__asm__ __volatile__(						\
639 	"1:	" insn "	%1, %3				\n"	\
640 	"2:							\n"	\
641 	"	.insn						\n"	\
642 	"	.section .fixup,\"ax\"				\n"	\
643 	"3:	li	%0, %4					\n"	\
644 	"	move	%1, $0					\n"	\
645 	"	j	2b					\n"	\
646 	"	.previous					\n"	\
647 	"	.section __ex_table,\"a\"			\n"	\
648 	"	"__UA_ADDR "\t1b, 3b				\n"	\
649 	"	"__UA_ADDR "\t1b + 4, 3b			\n"	\
650 	"	.previous					\n"	\
651 	: "=r" (__gu_err), "=r" (__gu_tmp)				\
652 	: "0" (0), "o" (__m(addr)), "i" (-EFAULT));			\
653 									\
654 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
655 }
656 
657 /*
658  * Get a long long 64 using 32 bit registers.
659  */
660 #define __get_user_unaligned_asm_ll32(val, addr)			\
661 {									\
662 	unsigned long long __gu_tmp;					\
663 									\
664 	__asm__ __volatile__(						\
665 	"1:	ulw	%1, (%3)				\n"	\
666 	"2:	ulw	%D1, 4(%3)				\n"	\
667 	"	move	%0, $0					\n"	\
668 	"3:							\n"	\
669 	"	.insn						\n"	\
670 	"	.section	.fixup,\"ax\"			\n"	\
671 	"4:	li	%0, %4					\n"	\
672 	"	move	%1, $0					\n"	\
673 	"	move	%D1, $0					\n"	\
674 	"	j	3b					\n"	\
675 	"	.previous					\n"	\
676 	"	.section	__ex_table,\"a\"		\n"	\
677 	"	" __UA_ADDR "	1b, 4b				\n"	\
678 	"	" __UA_ADDR "	1b + 4, 4b			\n"	\
679 	"	" __UA_ADDR "	2b, 4b				\n"	\
680 	"	" __UA_ADDR "	2b + 4, 4b			\n"	\
681 	"	.previous					\n"	\
682 	: "=r" (__gu_err), "=&r" (__gu_tmp)				\
683 	: "0" (0), "r" (addr), "i" (-EFAULT));				\
684 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
685 }
686 
687 /*
688  * Yuck.  We need two variants, one for 64bit operation and one
689  * for 32 bit mode and old iron.
690  */
691 #ifdef CONFIG_32BIT
692 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
693 #endif
694 #ifdef CONFIG_64BIT
695 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
696 #endif
697 
698 #define __put_user_unaligned_common(ptr, size)				\
699 do {									\
700 	switch (size) {							\
701 	case 1: __put_data_asm("sb", ptr); break;			\
702 	case 2: __put_user_unaligned_asm("ush", ptr); break;		\
703 	case 4: __put_user_unaligned_asm("usw", ptr); break;		\
704 	case 8: __PUT_USER_UNALIGNED_DW(ptr); break;			\
705 	default: __put_user_unaligned_unknown(); break;			\
706 } while (0)
707 
708 #define __put_user_unaligned_nocheck(x,ptr,size)			\
709 ({									\
710 	__typeof__(*(ptr)) __pu_val;					\
711 	int __pu_err = 0;						\
712 									\
713 	__pu_val = (x);							\
714 	__put_user_unaligned_common(ptr, size);				\
715 	__pu_err;							\
716 })
717 
718 #define __put_user_unaligned_check(x,ptr,size)				\
719 ({									\
720 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
721 	__typeof__(*(ptr)) __pu_val = (x);				\
722 	int __pu_err = -EFAULT;						\
723 									\
724 	if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size)))		\
725 		__put_user_unaligned_common(__pu_addr, size);		\
726 									\
727 	__pu_err;							\
728 })
729 
730 #define __put_user_unaligned_asm(insn, ptr)				\
731 {									\
732 	__asm__ __volatile__(						\
733 	"1:	" insn "	%z2, %3		# __put_user_unaligned_asm\n" \
734 	"2:							\n"	\
735 	"	.insn						\n"	\
736 	"	.section	.fixup,\"ax\"			\n"	\
737 	"3:	li	%0, %4					\n"	\
738 	"	j	2b					\n"	\
739 	"	.previous					\n"	\
740 	"	.section	__ex_table,\"a\"		\n"	\
741 	"	" __UA_ADDR "	1b, 3b				\n"	\
742 	"	.previous					\n"	\
743 	: "=r" (__pu_err)						\
744 	: "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),			\
745 	  "i" (-EFAULT));						\
746 }
747 
748 #define __put_user_unaligned_asm_ll32(ptr)				\
749 {									\
750 	__asm__ __volatile__(						\
751 	"1:	sw	%2, (%3)	# __put_user_unaligned_asm_ll32 \n" \
752 	"2:	sw	%D2, 4(%3)				\n"	\
753 	"3:							\n"	\
754 	"	.insn						\n"	\
755 	"	.section	.fixup,\"ax\"			\n"	\
756 	"4:	li	%0, %4					\n"	\
757 	"	j	3b					\n"	\
758 	"	.previous					\n"	\
759 	"	.section	__ex_table,\"a\"		\n"	\
760 	"	" __UA_ADDR "	1b, 4b				\n"	\
761 	"	" __UA_ADDR "	1b + 4, 4b			\n"	\
762 	"	" __UA_ADDR "	2b, 4b				\n"	\
763 	"	" __UA_ADDR "	2b + 4, 4b			\n"	\
764 	"	.previous"						\
765 	: "=r" (__pu_err)						\
766 	: "0" (0), "r" (__pu_val), "r" (ptr),				\
767 	  "i" (-EFAULT));						\
768 }
769 
770 extern void __put_user_unaligned_unknown(void);
771 #endif
772 
773 /*
774  * We're generating jump to subroutines which will be outside the range of
775  * jump instructions
776  */
777 #ifdef MODULE
778 #define __MODULE_JAL(destination)					\
779 	".set\tnoat\n\t"						\
780 	__UA_LA "\t$1, " #destination "\n\t"				\
781 	"jalr\t$1\n\t"							\
782 	".set\tat\n\t"
783 #else
784 #define __MODULE_JAL(destination)					\
785 	"jal\t" #destination "\n\t"
786 #endif
787 
788 #if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) &&	\
789 					      defined(CONFIG_CPU_HAS_PREFETCH))
790 #define DADDI_SCRATCH "$3"
791 #else
792 #define DADDI_SCRATCH "$0"
793 #endif
794 
795 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
796 
797 #ifndef CONFIG_EVA
798 #define __invoke_copy_to_user(to, from, n)				\
799 ({									\
800 	register void __user *__cu_to_r __asm__("$4");			\
801 	register const void *__cu_from_r __asm__("$5");			\
802 	register long __cu_len_r __asm__("$6");				\
803 									\
804 	__cu_to_r = (to);						\
805 	__cu_from_r = (from);						\
806 	__cu_len_r = (n);						\
807 	__asm__ __volatile__(						\
808 	__MODULE_JAL(__copy_user)					\
809 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
810 	:								\
811 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
812 	  DADDI_SCRATCH, "memory");					\
813 	__cu_len_r;							\
814 })
815 
816 #define __invoke_copy_to_kernel(to, from, n)				\
817 	__invoke_copy_to_user(to, from, n)
818 
819 #endif
820 
821 /*
822  * __copy_to_user: - Copy a block of data into user space, with less checking.
823  * @to:	  Destination address, in user space.
824  * @from: Source address, in kernel space.
825  * @n:	  Number of bytes to copy.
826  *
827  * Context: User context only. This function may sleep if pagefaults are
828  *          enabled.
829  *
830  * Copy data from kernel space to user space.  Caller must check
831  * the specified block with access_ok() before calling this function.
832  *
833  * Returns number of bytes that could not be copied.
834  * On success, this will be zero.
835  */
836 #define __copy_to_user(to, from, n)					\
837 ({									\
838 	void __user *__cu_to;						\
839 	const void *__cu_from;						\
840 	long __cu_len;							\
841 									\
842 	__cu_to = (to);							\
843 	__cu_from = (from);						\
844 	__cu_len = (n);							\
845 	might_fault();							\
846 	if (segment_eq(get_fs(), get_ds()))				\
847 		__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,	\
848 						   __cu_len);		\
849 	else								\
850 		__cu_len = __invoke_copy_to_user(__cu_to, __cu_from,	\
851 						 __cu_len);		\
852 	__cu_len;							\
853 })
854 
855 extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
856 
857 #define __copy_to_user_inatomic(to, from, n)				\
858 ({									\
859 	void __user *__cu_to;						\
860 	const void *__cu_from;						\
861 	long __cu_len;							\
862 									\
863 	__cu_to = (to);							\
864 	__cu_from = (from);						\
865 	__cu_len = (n);							\
866 	if (segment_eq(get_fs(), get_ds()))				\
867 		__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,	\
868 						   __cu_len);		\
869 	else								\
870 		__cu_len = __invoke_copy_to_user(__cu_to, __cu_from,	\
871 						 __cu_len);		\
872 	__cu_len;							\
873 })
874 
875 #define __copy_from_user_inatomic(to, from, n)				\
876 ({									\
877 	void *__cu_to;							\
878 	const void __user *__cu_from;					\
879 	long __cu_len;							\
880 									\
881 	__cu_to = (to);							\
882 	__cu_from = (from);						\
883 	__cu_len = (n);							\
884 	if (segment_eq(get_fs(), get_ds()))				\
885 		__cu_len = __invoke_copy_from_kernel_inatomic(__cu_to,	\
886 							      __cu_from,\
887 							      __cu_len);\
888 	else								\
889 		__cu_len = __invoke_copy_from_user_inatomic(__cu_to,	\
890 							    __cu_from,	\
891 							    __cu_len);	\
892 	__cu_len;							\
893 })
894 
895 /*
896  * copy_to_user: - Copy a block of data into user space.
897  * @to:	  Destination address, in user space.
898  * @from: Source address, in kernel space.
899  * @n:	  Number of bytes to copy.
900  *
901  * Context: User context only. This function may sleep if pagefaults are
902  *          enabled.
903  *
904  * Copy data from kernel space to user space.
905  *
906  * Returns number of bytes that could not be copied.
907  * On success, this will be zero.
908  */
909 #define copy_to_user(to, from, n)					\
910 ({									\
911 	void __user *__cu_to;						\
912 	const void *__cu_from;						\
913 	long __cu_len;							\
914 									\
915 	__cu_to = (to);							\
916 	__cu_from = (from);						\
917 	__cu_len = (n);							\
918 	if (segment_eq(get_fs(), get_ds())) {				\
919 		__cu_len = __invoke_copy_to_kernel(__cu_to,		\
920 						   __cu_from,		\
921 						   __cu_len);		\
922 	} else {							\
923 		if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) {       \
924 			might_fault();                                  \
925 			__cu_len = __invoke_copy_to_user(__cu_to,	\
926 							 __cu_from,	\
927 							 __cu_len);     \
928 		}							\
929 	}								\
930 	__cu_len;							\
931 })
932 
933 #ifndef CONFIG_EVA
934 
935 #define __invoke_copy_from_user(to, from, n)				\
936 ({									\
937 	register void *__cu_to_r __asm__("$4");				\
938 	register const void __user *__cu_from_r __asm__("$5");		\
939 	register long __cu_len_r __asm__("$6");				\
940 									\
941 	__cu_to_r = (to);						\
942 	__cu_from_r = (from);						\
943 	__cu_len_r = (n);						\
944 	__asm__ __volatile__(						\
945 	".set\tnoreorder\n\t"						\
946 	__MODULE_JAL(__copy_user)					\
947 	".set\tnoat\n\t"						\
948 	__UA_ADDU "\t$1, %1, %2\n\t"					\
949 	".set\tat\n\t"							\
950 	".set\treorder"							\
951 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
952 	:								\
953 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
954 	  DADDI_SCRATCH, "memory");					\
955 	__cu_len_r;							\
956 })
957 
958 #define __invoke_copy_from_kernel(to, from, n)				\
959 	__invoke_copy_from_user(to, from, n)
960 
961 /* For userland <-> userland operations */
962 #define ___invoke_copy_in_user(to, from, n)				\
963 	__invoke_copy_from_user(to, from, n)
964 
965 /* For kernel <-> kernel operations */
966 #define ___invoke_copy_in_kernel(to, from, n)				\
967 	__invoke_copy_from_user(to, from, n)
968 
969 #define __invoke_copy_from_user_inatomic(to, from, n)			\
970 ({									\
971 	register void *__cu_to_r __asm__("$4");				\
972 	register const void __user *__cu_from_r __asm__("$5");		\
973 	register long __cu_len_r __asm__("$6");				\
974 									\
975 	__cu_to_r = (to);						\
976 	__cu_from_r = (from);						\
977 	__cu_len_r = (n);						\
978 	__asm__ __volatile__(						\
979 	".set\tnoreorder\n\t"						\
980 	__MODULE_JAL(__copy_user_inatomic)				\
981 	".set\tnoat\n\t"						\
982 	__UA_ADDU "\t$1, %1, %2\n\t"					\
983 	".set\tat\n\t"							\
984 	".set\treorder"							\
985 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
986 	:								\
987 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
988 	  DADDI_SCRATCH, "memory");					\
989 	__cu_len_r;							\
990 })
991 
992 #define __invoke_copy_from_kernel_inatomic(to, from, n)			\
993 	__invoke_copy_from_user_inatomic(to, from, n)			\
994 
995 #else
996 
997 /* EVA specific functions */
998 
999 extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
1000 				       size_t __n);
1001 extern size_t __copy_from_user_eva(void *__to, const void *__from,
1002 				   size_t __n);
1003 extern size_t __copy_to_user_eva(void *__to, const void *__from,
1004 				 size_t __n);
1005 extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
1006 
1007 #define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr)	\
1008 ({									\
1009 	register void *__cu_to_r __asm__("$4");				\
1010 	register const void __user *__cu_from_r __asm__("$5");		\
1011 	register long __cu_len_r __asm__("$6");				\
1012 									\
1013 	__cu_to_r = (to);						\
1014 	__cu_from_r = (from);						\
1015 	__cu_len_r = (n);						\
1016 	__asm__ __volatile__(						\
1017 	".set\tnoreorder\n\t"						\
1018 	__MODULE_JAL(func_ptr)						\
1019 	".set\tnoat\n\t"						\
1020 	__UA_ADDU "\t$1, %1, %2\n\t"					\
1021 	".set\tat\n\t"							\
1022 	".set\treorder"							\
1023 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
1024 	:								\
1025 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
1026 	  DADDI_SCRATCH, "memory");					\
1027 	__cu_len_r;							\
1028 })
1029 
1030 #define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr)	\
1031 ({									\
1032 	register void *__cu_to_r __asm__("$4");				\
1033 	register const void __user *__cu_from_r __asm__("$5");		\
1034 	register long __cu_len_r __asm__("$6");				\
1035 									\
1036 	__cu_to_r = (to);						\
1037 	__cu_from_r = (from);						\
1038 	__cu_len_r = (n);						\
1039 	__asm__ __volatile__(						\
1040 	__MODULE_JAL(func_ptr)						\
1041 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
1042 	:								\
1043 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
1044 	  DADDI_SCRATCH, "memory");					\
1045 	__cu_len_r;							\
1046 })
1047 
1048 /*
1049  * Source or destination address is in userland. We need to go through
1050  * the TLB
1051  */
1052 #define __invoke_copy_from_user(to, from, n)				\
1053 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva)
1054 
1055 #define __invoke_copy_from_user_inatomic(to, from, n)			\
1056 	__invoke_copy_from_user_eva_generic(to, from, n,		\
1057 					    __copy_user_inatomic_eva)
1058 
1059 #define __invoke_copy_to_user(to, from, n)				\
1060 	__invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva)
1061 
1062 #define ___invoke_copy_in_user(to, from, n)				\
1063 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva)
1064 
1065 /*
1066  * Source or destination address in the kernel. We are not going through
1067  * the TLB
1068  */
1069 #define __invoke_copy_from_kernel(to, from, n)				\
1070 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1071 
1072 #define __invoke_copy_from_kernel_inatomic(to, from, n)			\
1073 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic)
1074 
1075 #define __invoke_copy_to_kernel(to, from, n)				\
1076 	__invoke_copy_to_user_eva_generic(to, from, n, __copy_user)
1077 
1078 #define ___invoke_copy_in_kernel(to, from, n)				\
1079 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1080 
1081 #endif /* CONFIG_EVA */
1082 
1083 /*
1084  * __copy_from_user: - Copy a block of data from user space, with less checking.
1085  * @to:	  Destination address, in kernel space.
1086  * @from: Source address, in user space.
1087  * @n:	  Number of bytes to copy.
1088  *
1089  * Context: User context only. This function may sleep if pagefaults are
1090  *          enabled.
1091  *
1092  * Copy data from user space to kernel space.  Caller must check
1093  * the specified block with access_ok() before calling this function.
1094  *
1095  * Returns number of bytes that could not be copied.
1096  * On success, this will be zero.
1097  *
1098  * If some data could not be copied, this function will pad the copied
1099  * data to the requested size using zero bytes.
1100  */
1101 #define __copy_from_user(to, from, n)					\
1102 ({									\
1103 	void *__cu_to;							\
1104 	const void __user *__cu_from;					\
1105 	long __cu_len;							\
1106 									\
1107 	__cu_to = (to);							\
1108 	__cu_from = (from);						\
1109 	__cu_len = (n);							\
1110 	might_fault();							\
1111 	__cu_len = __invoke_copy_from_user(__cu_to, __cu_from,		\
1112 					   __cu_len);			\
1113 	__cu_len;							\
1114 })
1115 
1116 /*
1117  * copy_from_user: - Copy a block of data from user space.
1118  * @to:	  Destination address, in kernel space.
1119  * @from: Source address, in user space.
1120  * @n:	  Number of bytes to copy.
1121  *
1122  * Context: User context only. This function may sleep if pagefaults are
1123  *          enabled.
1124  *
1125  * Copy data from user space to kernel space.
1126  *
1127  * Returns number of bytes that could not be copied.
1128  * On success, this will be zero.
1129  *
1130  * If some data could not be copied, this function will pad the copied
1131  * data to the requested size using zero bytes.
1132  */
1133 #define copy_from_user(to, from, n)					\
1134 ({									\
1135 	void *__cu_to;							\
1136 	const void __user *__cu_from;					\
1137 	long __cu_len;							\
1138 									\
1139 	__cu_to = (to);							\
1140 	__cu_from = (from);						\
1141 	__cu_len = (n);							\
1142 	if (segment_eq(get_fs(), get_ds())) {				\
1143 		__cu_len = __invoke_copy_from_kernel(__cu_to,		\
1144 						     __cu_from,		\
1145 						     __cu_len);		\
1146 	} else {							\
1147 		if (access_ok(VERIFY_READ, __cu_from, __cu_len)) {	\
1148 			might_fault();                                  \
1149 			__cu_len = __invoke_copy_from_user(__cu_to,	\
1150 							   __cu_from,	\
1151 							   __cu_len);   \
1152 		}							\
1153 	}								\
1154 	__cu_len;							\
1155 })
1156 
1157 #define __copy_in_user(to, from, n)					\
1158 ({									\
1159 	void __user *__cu_to;						\
1160 	const void __user *__cu_from;					\
1161 	long __cu_len;							\
1162 									\
1163 	__cu_to = (to);							\
1164 	__cu_from = (from);						\
1165 	__cu_len = (n);							\
1166 	if (segment_eq(get_fs(), get_ds())) {				\
1167 		__cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from,	\
1168 						    __cu_len);		\
1169 	} else {							\
1170 		might_fault();						\
1171 		__cu_len = ___invoke_copy_in_user(__cu_to, __cu_from,	\
1172 						  __cu_len);		\
1173 	}								\
1174 	__cu_len;							\
1175 })
1176 
1177 #define copy_in_user(to, from, n)					\
1178 ({									\
1179 	void __user *__cu_to;						\
1180 	const void __user *__cu_from;					\
1181 	long __cu_len;							\
1182 									\
1183 	__cu_to = (to);							\
1184 	__cu_from = (from);						\
1185 	__cu_len = (n);							\
1186 	if (segment_eq(get_fs(), get_ds())) {				\
1187 		__cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from,	\
1188 						    __cu_len);		\
1189 	} else {							\
1190 		if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
1191 			   access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
1192 			might_fault();					\
1193 			__cu_len = ___invoke_copy_in_user(__cu_to,	\
1194 							  __cu_from,	\
1195 							  __cu_len);	\
1196 		}							\
1197 	}								\
1198 	__cu_len;							\
1199 })
1200 
1201 /*
1202  * __clear_user: - Zero a block of memory in user space, with less checking.
1203  * @to:	  Destination address, in user space.
1204  * @n:	  Number of bytes to zero.
1205  *
1206  * Zero a block of memory in user space.  Caller must check
1207  * the specified block with access_ok() before calling this function.
1208  *
1209  * Returns number of bytes that could not be cleared.
1210  * On success, this will be zero.
1211  */
1212 static inline __kernel_size_t
1213 __clear_user(void __user *addr, __kernel_size_t size)
1214 {
1215 	__kernel_size_t res;
1216 
1217 	might_fault();
1218 	__asm__ __volatile__(
1219 		"move\t$4, %1\n\t"
1220 		"move\t$5, $0\n\t"
1221 		"move\t$6, %2\n\t"
1222 		__MODULE_JAL(__bzero)
1223 		"move\t%0, $6"
1224 		: "=r" (res)
1225 		: "r" (addr), "r" (size)
1226 		: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
1227 
1228 	return res;
1229 }
1230 
1231 #define clear_user(addr,n)						\
1232 ({									\
1233 	void __user * __cl_addr = (addr);				\
1234 	unsigned long __cl_size = (n);					\
1235 	if (__cl_size && access_ok(VERIFY_WRITE,			\
1236 					__cl_addr, __cl_size))		\
1237 		__cl_size = __clear_user(__cl_addr, __cl_size);		\
1238 	__cl_size;							\
1239 })
1240 
1241 /*
1242  * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
1243  * @dst:   Destination address, in kernel space.  This buffer must be at
1244  *	   least @count bytes long.
1245  * @src:   Source address, in user space.
1246  * @count: Maximum number of bytes to copy, including the trailing NUL.
1247  *
1248  * Copies a NUL-terminated string from userspace to kernel space.
1249  * Caller must check the specified block with access_ok() before calling
1250  * this function.
1251  *
1252  * On success, returns the length of the string (not including the trailing
1253  * NUL).
1254  *
1255  * If access to userspace fails, returns -EFAULT (some data may have been
1256  * copied).
1257  *
1258  * If @count is smaller than the length of the string, copies @count bytes
1259  * and returns @count.
1260  */
1261 static inline long
1262 __strncpy_from_user(char *__to, const char __user *__from, long __len)
1263 {
1264 	long res;
1265 
1266 	if (segment_eq(get_fs(), get_ds())) {
1267 		__asm__ __volatile__(
1268 			"move\t$4, %1\n\t"
1269 			"move\t$5, %2\n\t"
1270 			"move\t$6, %3\n\t"
1271 			__MODULE_JAL(__strncpy_from_kernel_nocheck_asm)
1272 			"move\t%0, $2"
1273 			: "=r" (res)
1274 			: "r" (__to), "r" (__from), "r" (__len)
1275 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1276 	} else {
1277 		might_fault();
1278 		__asm__ __volatile__(
1279 			"move\t$4, %1\n\t"
1280 			"move\t$5, %2\n\t"
1281 			"move\t$6, %3\n\t"
1282 			__MODULE_JAL(__strncpy_from_user_nocheck_asm)
1283 			"move\t%0, $2"
1284 			: "=r" (res)
1285 			: "r" (__to), "r" (__from), "r" (__len)
1286 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1287 	}
1288 
1289 	return res;
1290 }
1291 
1292 /*
1293  * strncpy_from_user: - Copy a NUL terminated string from userspace.
1294  * @dst:   Destination address, in kernel space.  This buffer must be at
1295  *	   least @count bytes long.
1296  * @src:   Source address, in user space.
1297  * @count: Maximum number of bytes to copy, including the trailing NUL.
1298  *
1299  * Copies a NUL-terminated string from userspace to kernel space.
1300  *
1301  * On success, returns the length of the string (not including the trailing
1302  * NUL).
1303  *
1304  * If access to userspace fails, returns -EFAULT (some data may have been
1305  * copied).
1306  *
1307  * If @count is smaller than the length of the string, copies @count bytes
1308  * and returns @count.
1309  */
1310 static inline long
1311 strncpy_from_user(char *__to, const char __user *__from, long __len)
1312 {
1313 	long res;
1314 
1315 	if (segment_eq(get_fs(), get_ds())) {
1316 		__asm__ __volatile__(
1317 			"move\t$4, %1\n\t"
1318 			"move\t$5, %2\n\t"
1319 			"move\t$6, %3\n\t"
1320 			__MODULE_JAL(__strncpy_from_kernel_asm)
1321 			"move\t%0, $2"
1322 			: "=r" (res)
1323 			: "r" (__to), "r" (__from), "r" (__len)
1324 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1325 	} else {
1326 		might_fault();
1327 		__asm__ __volatile__(
1328 			"move\t$4, %1\n\t"
1329 			"move\t$5, %2\n\t"
1330 			"move\t$6, %3\n\t"
1331 			__MODULE_JAL(__strncpy_from_user_asm)
1332 			"move\t%0, $2"
1333 			: "=r" (res)
1334 			: "r" (__to), "r" (__from), "r" (__len)
1335 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1336 	}
1337 
1338 	return res;
1339 }
1340 
1341 /*
1342  * strlen_user: - Get the size of a string in user space.
1343  * @str: The string to measure.
1344  *
1345  * Context: User context only. This function may sleep if pagefaults are
1346  *          enabled.
1347  *
1348  * Get the size of a NUL-terminated string in user space.
1349  *
1350  * Returns the size of the string INCLUDING the terminating NUL.
1351  * On exception, returns 0.
1352  *
1353  * If there is a limit on the length of a valid string, you may wish to
1354  * consider using strnlen_user() instead.
1355  */
1356 static inline long strlen_user(const char __user *s)
1357 {
1358 	long res;
1359 
1360 	if (segment_eq(get_fs(), get_ds())) {
1361 		__asm__ __volatile__(
1362 			"move\t$4, %1\n\t"
1363 			__MODULE_JAL(__strlen_kernel_asm)
1364 			"move\t%0, $2"
1365 			: "=r" (res)
1366 			: "r" (s)
1367 			: "$2", "$4", __UA_t0, "$31");
1368 	} else {
1369 		might_fault();
1370 		__asm__ __volatile__(
1371 			"move\t$4, %1\n\t"
1372 			__MODULE_JAL(__strlen_kernel_asm)
1373 			"move\t%0, $2"
1374 			: "=r" (res)
1375 			: "r" (s)
1376 			: "$2", "$4", __UA_t0, "$31");
1377 	}
1378 
1379 	return res;
1380 }
1381 
1382 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1383 static inline long __strnlen_user(const char __user *s, long n)
1384 {
1385 	long res;
1386 
1387 	if (segment_eq(get_fs(), get_ds())) {
1388 		__asm__ __volatile__(
1389 			"move\t$4, %1\n\t"
1390 			"move\t$5, %2\n\t"
1391 			__MODULE_JAL(__strnlen_kernel_nocheck_asm)
1392 			"move\t%0, $2"
1393 			: "=r" (res)
1394 			: "r" (s), "r" (n)
1395 			: "$2", "$4", "$5", __UA_t0, "$31");
1396 	} else {
1397 		might_fault();
1398 		__asm__ __volatile__(
1399 			"move\t$4, %1\n\t"
1400 			"move\t$5, %2\n\t"
1401 			__MODULE_JAL(__strnlen_user_nocheck_asm)
1402 			"move\t%0, $2"
1403 			: "=r" (res)
1404 			: "r" (s), "r" (n)
1405 			: "$2", "$4", "$5", __UA_t0, "$31");
1406 	}
1407 
1408 	return res;
1409 }
1410 
1411 /*
1412  * strnlen_user: - Get the size of a string in user space.
1413  * @str: The string to measure.
1414  *
1415  * Context: User context only. This function may sleep if pagefaults are
1416  *          enabled.
1417  *
1418  * Get the size of a NUL-terminated string in user space.
1419  *
1420  * Returns the size of the string INCLUDING the terminating NUL.
1421  * On exception, returns 0.
1422  * If the string is too long, returns a value greater than @n.
1423  */
1424 static inline long strnlen_user(const char __user *s, long n)
1425 {
1426 	long res;
1427 
1428 	might_fault();
1429 	if (segment_eq(get_fs(), get_ds())) {
1430 		__asm__ __volatile__(
1431 			"move\t$4, %1\n\t"
1432 			"move\t$5, %2\n\t"
1433 			__MODULE_JAL(__strnlen_kernel_asm)
1434 			"move\t%0, $2"
1435 			: "=r" (res)
1436 			: "r" (s), "r" (n)
1437 			: "$2", "$4", "$5", __UA_t0, "$31");
1438 	} else {
1439 		__asm__ __volatile__(
1440 			"move\t$4, %1\n\t"
1441 			"move\t$5, %2\n\t"
1442 			__MODULE_JAL(__strnlen_user_asm)
1443 			"move\t%0, $2"
1444 			: "=r" (res)
1445 			: "r" (s), "r" (n)
1446 			: "$2", "$4", "$5", __UA_t0, "$31");
1447 	}
1448 
1449 	return res;
1450 }
1451 
1452 struct exception_table_entry
1453 {
1454 	unsigned long insn;
1455 	unsigned long nextinsn;
1456 };
1457 
1458 extern int fixup_exception(struct pt_regs *regs);
1459 
1460 #endif /* _ASM_UACCESS_H */
1461