xref: /openbmc/linux/arch/mips/include/asm/uaccess.h (revision a6ca5ac746d104019e76c29e69c2a1fc6dd2b29f)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  * Copyright (C) 2007  Maciej W. Rozycki
9  * Copyright (C) 2014, Imagination Technologies Ltd.
10  */
11 #ifndef _ASM_UACCESS_H
12 #define _ASM_UACCESS_H
13 
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <asm/asm-eva.h>
17 #include <asm/extable.h>
18 
19 /*
20  * The fs value determines whether argument validity checking should be
21  * performed or not.  If get_fs() == USER_DS, checking is performed, with
22  * get_fs() == KERNEL_DS, checking is bypassed.
23  *
24  * For historical reasons, these macros are grossly misnamed.
25  */
26 #ifdef CONFIG_32BIT
27 
28 #ifdef CONFIG_KVM_GUEST
29 #define __UA_LIMIT 0x40000000UL
30 #else
31 #define __UA_LIMIT 0x80000000UL
32 #endif
33 
34 #define __UA_ADDR	".word"
35 #define __UA_LA		"la"
36 #define __UA_ADDU	"addu"
37 #define __UA_t0		"$8"
38 #define __UA_t1		"$9"
39 
40 #endif /* CONFIG_32BIT */
41 
42 #ifdef CONFIG_64BIT
43 
44 extern u64 __ua_limit;
45 
46 #define __UA_LIMIT	__ua_limit
47 
48 #define __UA_ADDR	".dword"
49 #define __UA_LA		"dla"
50 #define __UA_ADDU	"daddu"
51 #define __UA_t0		"$12"
52 #define __UA_t1		"$13"
53 
54 #endif /* CONFIG_64BIT */
55 
56 /*
57  * USER_DS is a bitmask that has the bits set that may not be set in a valid
58  * userspace address.  Note that we limit 32-bit userspace to 0x7fff8000 but
59  * the arithmetic we're doing only works if the limit is a power of two, so
60  * we use 0x80000000 here on 32-bit kernels.  If a process passes an invalid
61  * address in this range it's the process's problem, not ours :-)
62  */
63 
64 #ifdef CONFIG_KVM_GUEST
65 #define KERNEL_DS	((mm_segment_t) { 0x80000000UL })
66 #define USER_DS		((mm_segment_t) { 0xC0000000UL })
67 #else
68 #define KERNEL_DS	((mm_segment_t) { 0UL })
69 #define USER_DS		((mm_segment_t) { __UA_LIMIT })
70 #endif
71 
72 #define get_ds()	(KERNEL_DS)
73 #define get_fs()	(current_thread_info()->addr_limit)
74 #define set_fs(x)	(current_thread_info()->addr_limit = (x))
75 
76 #define segment_eq(a, b)	((a).seg == (b).seg)
77 
78 /*
79  * eva_kernel_access() - determine whether kernel memory access on an EVA system
80  *
81  * Determines whether memory accesses should be performed to kernel memory
82  * on a system using Extended Virtual Addressing (EVA).
83  *
84  * Return: true if a kernel memory access on an EVA system, else false.
85  */
86 static inline bool eva_kernel_access(void)
87 {
88 	if (!IS_ENABLED(CONFIG_EVA))
89 		return false;
90 
91 	return uaccess_kernel();
92 }
93 
94 /*
95  * Is a address valid? This does a straightforward calculation rather
96  * than tests.
97  *
98  * Address valid if:
99  *  - "addr" doesn't have any high-bits set
100  *  - AND "size" doesn't have any high-bits set
101  *  - AND "addr+size" doesn't have any high-bits set
102  *  - OR we are in kernel mode.
103  *
104  * __ua_size() is a trick to avoid runtime checking of positive constant
105  * sizes; for those we already know at compile time that the size is ok.
106  */
107 #define __ua_size(size)							\
108 	((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
109 
110 /*
111  * access_ok: - Checks if a user space pointer is valid
112  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
113  *	  %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
114  *	  to write to a block, it is always safe to read from it.
115  * @addr: User space pointer to start of block to check
116  * @size: Size of block to check
117  *
118  * Context: User context only. This function may sleep if pagefaults are
119  *          enabled.
120  *
121  * Checks if a pointer to a block of memory in user space is valid.
122  *
123  * Returns true (nonzero) if the memory block may be valid, false (zero)
124  * if it is definitely invalid.
125  *
126  * Note that, depending on architecture, this function probably just
127  * checks that the pointer is in the user space range - after calling
128  * this function, memory access functions may still return -EFAULT.
129  */
130 
131 static inline int __access_ok(const void __user *p, unsigned long size)
132 {
133 	unsigned long addr = (unsigned long)p;
134 	return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0;
135 }
136 
137 #define access_ok(type, addr, size)					\
138 	likely(__access_ok((addr), (size)))
139 
140 /*
141  * put_user: - Write a simple value into user space.
142  * @x:	 Value to copy to user space.
143  * @ptr: Destination address, in user space.
144  *
145  * Context: User context only. This function may sleep if pagefaults are
146  *          enabled.
147  *
148  * This macro copies a single simple value from kernel space to user
149  * space.  It supports simple types like char and int, but not larger
150  * data types like structures or arrays.
151  *
152  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
153  * to the result of dereferencing @ptr.
154  *
155  * Returns zero on success, or -EFAULT on error.
156  */
157 #define put_user(x,ptr) \
158 	__put_user_check((x), (ptr), sizeof(*(ptr)))
159 
160 /*
161  * get_user: - Get a simple variable from user space.
162  * @x:	 Variable to store result.
163  * @ptr: Source address, in user space.
164  *
165  * Context: User context only. This function may sleep if pagefaults are
166  *          enabled.
167  *
168  * This macro copies a single simple variable from user space to kernel
169  * space.  It supports simple types like char and int, but not larger
170  * data types like structures or arrays.
171  *
172  * @ptr must have pointer-to-simple-variable type, and the result of
173  * dereferencing @ptr must be assignable to @x without a cast.
174  *
175  * Returns zero on success, or -EFAULT on error.
176  * On error, the variable @x is set to zero.
177  */
178 #define get_user(x,ptr) \
179 	__get_user_check((x), (ptr), sizeof(*(ptr)))
180 
181 /*
182  * __put_user: - Write a simple value into user space, with less checking.
183  * @x:	 Value to copy to user space.
184  * @ptr: Destination address, in user space.
185  *
186  * Context: User context only. This function may sleep if pagefaults are
187  *          enabled.
188  *
189  * This macro copies a single simple value from kernel space to user
190  * space.  It supports simple types like char and int, but not larger
191  * data types like structures or arrays.
192  *
193  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
194  * to the result of dereferencing @ptr.
195  *
196  * Caller must check the pointer with access_ok() before calling this
197  * function.
198  *
199  * Returns zero on success, or -EFAULT on error.
200  */
201 #define __put_user(x,ptr) \
202 	__put_user_nocheck((x), (ptr), sizeof(*(ptr)))
203 
204 /*
205  * __get_user: - Get a simple variable from user space, with less checking.
206  * @x:	 Variable to store result.
207  * @ptr: Source address, in user space.
208  *
209  * Context: User context only. This function may sleep if pagefaults are
210  *          enabled.
211  *
212  * This macro copies a single simple variable from user space to kernel
213  * space.  It supports simple types like char and int, but not larger
214  * data types like structures or arrays.
215  *
216  * @ptr must have pointer-to-simple-variable type, and the result of
217  * dereferencing @ptr must be assignable to @x without a cast.
218  *
219  * Caller must check the pointer with access_ok() before calling this
220  * function.
221  *
222  * Returns zero on success, or -EFAULT on error.
223  * On error, the variable @x is set to zero.
224  */
225 #define __get_user(x,ptr) \
226 	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
227 
228 struct __large_struct { unsigned long buf[100]; };
229 #define __m(x) (*(struct __large_struct __user *)(x))
230 
231 /*
232  * Yuck.  We need two variants, one for 64bit operation and one
233  * for 32 bit mode and old iron.
234  */
235 #ifndef CONFIG_EVA
236 #define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
237 #else
238 /*
239  * Kernel specific functions for EVA. We need to use normal load instructions
240  * to read data from kernel when operating in EVA mode. We use these macros to
241  * avoid redefining __get_user_asm for EVA.
242  */
243 #undef _loadd
244 #undef _loadw
245 #undef _loadh
246 #undef _loadb
247 #ifdef CONFIG_32BIT
248 #define _loadd			_loadw
249 #else
250 #define _loadd(reg, addr)	"ld " reg ", " addr
251 #endif
252 #define _loadw(reg, addr)	"lw " reg ", " addr
253 #define _loadh(reg, addr)	"lh " reg ", " addr
254 #define _loadb(reg, addr)	"lb " reg ", " addr
255 
256 #define __get_kernel_common(val, size, ptr)				\
257 do {									\
258 	switch (size) {							\
259 	case 1: __get_data_asm(val, _loadb, ptr); break;		\
260 	case 2: __get_data_asm(val, _loadh, ptr); break;		\
261 	case 4: __get_data_asm(val, _loadw, ptr); break;		\
262 	case 8: __GET_DW(val, _loadd, ptr); break;			\
263 	default: __get_user_unknown(); break;				\
264 	}								\
265 } while (0)
266 #endif
267 
268 #ifdef CONFIG_32BIT
269 #define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
270 #endif
271 #ifdef CONFIG_64BIT
272 #define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
273 #endif
274 
275 extern void __get_user_unknown(void);
276 
277 #define __get_user_common(val, size, ptr)				\
278 do {									\
279 	switch (size) {							\
280 	case 1: __get_data_asm(val, user_lb, ptr); break;		\
281 	case 2: __get_data_asm(val, user_lh, ptr); break;		\
282 	case 4: __get_data_asm(val, user_lw, ptr); break;		\
283 	case 8: __GET_DW(val, user_ld, ptr); break;			\
284 	default: __get_user_unknown(); break;				\
285 	}								\
286 } while (0)
287 
288 #define __get_user_nocheck(x, ptr, size)				\
289 ({									\
290 	int __gu_err;							\
291 									\
292 	if (eva_kernel_access()) {					\
293 		__get_kernel_common((x), size, ptr);			\
294 	} else {							\
295 		__chk_user_ptr(ptr);					\
296 		__get_user_common((x), size, ptr);			\
297 	}								\
298 	__gu_err;							\
299 })
300 
301 #define __get_user_check(x, ptr, size)					\
302 ({									\
303 	int __gu_err = -EFAULT;						\
304 	const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);		\
305 									\
306 	might_fault();							\
307 	if (likely(access_ok(VERIFY_READ,  __gu_ptr, size))) {		\
308 		if (eva_kernel_access())				\
309 			__get_kernel_common((x), size, __gu_ptr);	\
310 		else							\
311 			__get_user_common((x), size, __gu_ptr);		\
312 	} else								\
313 		(x) = 0;						\
314 									\
315 	__gu_err;							\
316 })
317 
318 #define __get_data_asm(val, insn, addr)					\
319 {									\
320 	long __gu_tmp;							\
321 									\
322 	__asm__ __volatile__(						\
323 	"1:	"insn("%1", "%3")"				\n"	\
324 	"2:							\n"	\
325 	"	.insn						\n"	\
326 	"	.section .fixup,\"ax\"				\n"	\
327 	"3:	li	%0, %4					\n"	\
328 	"	move	%1, $0					\n"	\
329 	"	j	2b					\n"	\
330 	"	.previous					\n"	\
331 	"	.section __ex_table,\"a\"			\n"	\
332 	"	"__UA_ADDR "\t1b, 3b				\n"	\
333 	"	.previous					\n"	\
334 	: "=r" (__gu_err), "=r" (__gu_tmp)				\
335 	: "0" (0), "o" (__m(addr)), "i" (-EFAULT));			\
336 									\
337 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
338 }
339 
340 /*
341  * Get a long long 64 using 32 bit registers.
342  */
343 #define __get_data_asm_ll32(val, insn, addr)				\
344 {									\
345 	union {								\
346 		unsigned long long	l;				\
347 		__typeof__(*(addr))	t;				\
348 	} __gu_tmp;							\
349 									\
350 	__asm__ __volatile__(						\
351 	"1:	" insn("%1", "(%3)")"				\n"	\
352 	"2:	" insn("%D1", "4(%3)")"				\n"	\
353 	"3:							\n"	\
354 	"	.insn						\n"	\
355 	"	.section	.fixup,\"ax\"			\n"	\
356 	"4:	li	%0, %4					\n"	\
357 	"	move	%1, $0					\n"	\
358 	"	move	%D1, $0					\n"	\
359 	"	j	3b					\n"	\
360 	"	.previous					\n"	\
361 	"	.section	__ex_table,\"a\"		\n"	\
362 	"	" __UA_ADDR "	1b, 4b				\n"	\
363 	"	" __UA_ADDR "	2b, 4b				\n"	\
364 	"	.previous					\n"	\
365 	: "=r" (__gu_err), "=&r" (__gu_tmp.l)				\
366 	: "0" (0), "r" (addr), "i" (-EFAULT));				\
367 									\
368 	(val) = __gu_tmp.t;						\
369 }
370 
371 #ifndef CONFIG_EVA
372 #define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
373 #else
374 /*
375  * Kernel specific functions for EVA. We need to use normal load instructions
376  * to read data from kernel when operating in EVA mode. We use these macros to
377  * avoid redefining __get_data_asm for EVA.
378  */
379 #undef _stored
380 #undef _storew
381 #undef _storeh
382 #undef _storeb
383 #ifdef CONFIG_32BIT
384 #define _stored			_storew
385 #else
386 #define _stored(reg, addr)	"ld " reg ", " addr
387 #endif
388 
389 #define _storew(reg, addr)	"sw " reg ", " addr
390 #define _storeh(reg, addr)	"sh " reg ", " addr
391 #define _storeb(reg, addr)	"sb " reg ", " addr
392 
393 #define __put_kernel_common(ptr, size)					\
394 do {									\
395 	switch (size) {							\
396 	case 1: __put_data_asm(_storeb, ptr); break;			\
397 	case 2: __put_data_asm(_storeh, ptr); break;			\
398 	case 4: __put_data_asm(_storew, ptr); break;			\
399 	case 8: __PUT_DW(_stored, ptr); break;				\
400 	default: __put_user_unknown(); break;				\
401 	}								\
402 } while(0)
403 #endif
404 
405 /*
406  * Yuck.  We need two variants, one for 64bit operation and one
407  * for 32 bit mode and old iron.
408  */
409 #ifdef CONFIG_32BIT
410 #define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
411 #endif
412 #ifdef CONFIG_64BIT
413 #define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
414 #endif
415 
416 #define __put_user_common(ptr, size)					\
417 do {									\
418 	switch (size) {							\
419 	case 1: __put_data_asm(user_sb, ptr); break;			\
420 	case 2: __put_data_asm(user_sh, ptr); break;			\
421 	case 4: __put_data_asm(user_sw, ptr); break;			\
422 	case 8: __PUT_DW(user_sd, ptr); break;				\
423 	default: __put_user_unknown(); break;				\
424 	}								\
425 } while (0)
426 
427 #define __put_user_nocheck(x, ptr, size)				\
428 ({									\
429 	__typeof__(*(ptr)) __pu_val;					\
430 	int __pu_err = 0;						\
431 									\
432 	__pu_val = (x);							\
433 	if (eva_kernel_access()) {					\
434 		__put_kernel_common(ptr, size);				\
435 	} else {							\
436 		__chk_user_ptr(ptr);					\
437 		__put_user_common(ptr, size);				\
438 	}								\
439 	__pu_err;							\
440 })
441 
442 #define __put_user_check(x, ptr, size)					\
443 ({									\
444 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
445 	__typeof__(*(ptr)) __pu_val = (x);				\
446 	int __pu_err = -EFAULT;						\
447 									\
448 	might_fault();							\
449 	if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {	\
450 		if (eva_kernel_access())				\
451 			__put_kernel_common(__pu_addr, size);		\
452 		else							\
453 			__put_user_common(__pu_addr, size);		\
454 	}								\
455 									\
456 	__pu_err;							\
457 })
458 
459 #define __put_data_asm(insn, ptr)					\
460 {									\
461 	__asm__ __volatile__(						\
462 	"1:	"insn("%z2", "%3")"	# __put_data_asm	\n"	\
463 	"2:							\n"	\
464 	"	.insn						\n"	\
465 	"	.section	.fixup,\"ax\"			\n"	\
466 	"3:	li	%0, %4					\n"	\
467 	"	j	2b					\n"	\
468 	"	.previous					\n"	\
469 	"	.section	__ex_table,\"a\"		\n"	\
470 	"	" __UA_ADDR "	1b, 3b				\n"	\
471 	"	.previous					\n"	\
472 	: "=r" (__pu_err)						\
473 	: "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),			\
474 	  "i" (-EFAULT));						\
475 }
476 
477 #define __put_data_asm_ll32(insn, ptr)					\
478 {									\
479 	__asm__ __volatile__(						\
480 	"1:	"insn("%2", "(%3)")"	# __put_data_asm_ll32	\n"	\
481 	"2:	"insn("%D2", "4(%3)")"				\n"	\
482 	"3:							\n"	\
483 	"	.insn						\n"	\
484 	"	.section	.fixup,\"ax\"			\n"	\
485 	"4:	li	%0, %4					\n"	\
486 	"	j	3b					\n"	\
487 	"	.previous					\n"	\
488 	"	.section	__ex_table,\"a\"		\n"	\
489 	"	" __UA_ADDR "	1b, 4b				\n"	\
490 	"	" __UA_ADDR "	2b, 4b				\n"	\
491 	"	.previous"						\
492 	: "=r" (__pu_err)						\
493 	: "0" (0), "r" (__pu_val), "r" (ptr),				\
494 	  "i" (-EFAULT));						\
495 }
496 
497 extern void __put_user_unknown(void);
498 
499 /*
500  * ul{b,h,w} are macros and there are no equivalent macros for EVA.
501  * EVA unaligned access is handled in the ADE exception handler.
502  */
503 #ifndef CONFIG_EVA
504 /*
505  * put_user_unaligned: - Write a simple value into user space.
506  * @x:	 Value to copy to user space.
507  * @ptr: Destination address, in user space.
508  *
509  * Context: User context only. This function may sleep if pagefaults are
510  *          enabled.
511  *
512  * This macro copies a single simple value from kernel space to user
513  * space.  It supports simple types like char and int, but not larger
514  * data types like structures or arrays.
515  *
516  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
517  * to the result of dereferencing @ptr.
518  *
519  * Returns zero on success, or -EFAULT on error.
520  */
521 #define put_user_unaligned(x,ptr)	\
522 	__put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
523 
524 /*
525  * get_user_unaligned: - Get a simple variable from user space.
526  * @x:	 Variable to store result.
527  * @ptr: Source address, in user space.
528  *
529  * Context: User context only. This function may sleep if pagefaults are
530  *          enabled.
531  *
532  * This macro copies a single simple variable from user space to kernel
533  * space.  It supports simple types like char and int, but not larger
534  * data types like structures or arrays.
535  *
536  * @ptr must have pointer-to-simple-variable type, and the result of
537  * dereferencing @ptr must be assignable to @x without a cast.
538  *
539  * Returns zero on success, or -EFAULT on error.
540  * On error, the variable @x is set to zero.
541  */
542 #define get_user_unaligned(x,ptr) \
543 	__get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
544 
545 /*
546  * __put_user_unaligned: - Write a simple value into user space, with less checking.
547  * @x:	 Value to copy to user space.
548  * @ptr: Destination address, in user space.
549  *
550  * Context: User context only. This function may sleep if pagefaults are
551  *          enabled.
552  *
553  * This macro copies a single simple value from kernel space to user
554  * space.  It supports simple types like char and int, but not larger
555  * data types like structures or arrays.
556  *
557  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
558  * to the result of dereferencing @ptr.
559  *
560  * Caller must check the pointer with access_ok() before calling this
561  * function.
562  *
563  * Returns zero on success, or -EFAULT on error.
564  */
565 #define __put_user_unaligned(x,ptr) \
566 	__put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
567 
568 /*
569  * __get_user_unaligned: - Get a simple variable from user space, with less checking.
570  * @x:	 Variable to store result.
571  * @ptr: Source address, in user space.
572  *
573  * Context: User context only. This function may sleep if pagefaults are
574  *          enabled.
575  *
576  * This macro copies a single simple variable from user space to kernel
577  * space.  It supports simple types like char and int, but not larger
578  * data types like structures or arrays.
579  *
580  * @ptr must have pointer-to-simple-variable type, and the result of
581  * dereferencing @ptr must be assignable to @x without a cast.
582  *
583  * Caller must check the pointer with access_ok() before calling this
584  * function.
585  *
586  * Returns zero on success, or -EFAULT on error.
587  * On error, the variable @x is set to zero.
588  */
589 #define __get_user_unaligned(x,ptr) \
590 	__get_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
591 
592 /*
593  * Yuck.  We need two variants, one for 64bit operation and one
594  * for 32 bit mode and old iron.
595  */
596 #ifdef CONFIG_32BIT
597 #define __GET_USER_UNALIGNED_DW(val, ptr)				\
598 	__get_user_unaligned_asm_ll32(val, ptr)
599 #endif
600 #ifdef CONFIG_64BIT
601 #define __GET_USER_UNALIGNED_DW(val, ptr)				\
602 	__get_user_unaligned_asm(val, "uld", ptr)
603 #endif
604 
605 extern void __get_user_unaligned_unknown(void);
606 
607 #define __get_user_unaligned_common(val, size, ptr)			\
608 do {									\
609 	switch (size) {							\
610 	case 1: __get_data_asm(val, "lb", ptr); break;			\
611 	case 2: __get_data_unaligned_asm(val, "ulh", ptr); break;	\
612 	case 4: __get_data_unaligned_asm(val, "ulw", ptr); break;	\
613 	case 8: __GET_USER_UNALIGNED_DW(val, ptr); break;		\
614 	default: __get_user_unaligned_unknown(); break;			\
615 	}								\
616 } while (0)
617 
618 #define __get_user_unaligned_nocheck(x,ptr,size)			\
619 ({									\
620 	int __gu_err;							\
621 									\
622 	__get_user_unaligned_common((x), size, ptr);			\
623 	__gu_err;							\
624 })
625 
626 #define __get_user_unaligned_check(x,ptr,size)				\
627 ({									\
628 	int __gu_err = -EFAULT;						\
629 	const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);		\
630 									\
631 	if (likely(access_ok(VERIFY_READ,  __gu_ptr, size)))		\
632 		__get_user_unaligned_common((x), size, __gu_ptr);	\
633 									\
634 	__gu_err;							\
635 })
636 
637 #define __get_data_unaligned_asm(val, insn, addr)			\
638 {									\
639 	long __gu_tmp;							\
640 									\
641 	__asm__ __volatile__(						\
642 	"1:	" insn "	%1, %3				\n"	\
643 	"2:							\n"	\
644 	"	.insn						\n"	\
645 	"	.section .fixup,\"ax\"				\n"	\
646 	"3:	li	%0, %4					\n"	\
647 	"	move	%1, $0					\n"	\
648 	"	j	2b					\n"	\
649 	"	.previous					\n"	\
650 	"	.section __ex_table,\"a\"			\n"	\
651 	"	"__UA_ADDR "\t1b, 3b				\n"	\
652 	"	"__UA_ADDR "\t1b + 4, 3b			\n"	\
653 	"	.previous					\n"	\
654 	: "=r" (__gu_err), "=r" (__gu_tmp)				\
655 	: "0" (0), "o" (__m(addr)), "i" (-EFAULT));			\
656 									\
657 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
658 }
659 
660 /*
661  * Get a long long 64 using 32 bit registers.
662  */
663 #define __get_user_unaligned_asm_ll32(val, addr)			\
664 {									\
665 	unsigned long long __gu_tmp;					\
666 									\
667 	__asm__ __volatile__(						\
668 	"1:	ulw	%1, (%3)				\n"	\
669 	"2:	ulw	%D1, 4(%3)				\n"	\
670 	"	move	%0, $0					\n"	\
671 	"3:							\n"	\
672 	"	.insn						\n"	\
673 	"	.section	.fixup,\"ax\"			\n"	\
674 	"4:	li	%0, %4					\n"	\
675 	"	move	%1, $0					\n"	\
676 	"	move	%D1, $0					\n"	\
677 	"	j	3b					\n"	\
678 	"	.previous					\n"	\
679 	"	.section	__ex_table,\"a\"		\n"	\
680 	"	" __UA_ADDR "	1b, 4b				\n"	\
681 	"	" __UA_ADDR "	1b + 4, 4b			\n"	\
682 	"	" __UA_ADDR "	2b, 4b				\n"	\
683 	"	" __UA_ADDR "	2b + 4, 4b			\n"	\
684 	"	.previous					\n"	\
685 	: "=r" (__gu_err), "=&r" (__gu_tmp)				\
686 	: "0" (0), "r" (addr), "i" (-EFAULT));				\
687 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
688 }
689 
690 /*
691  * Yuck.  We need two variants, one for 64bit operation and one
692  * for 32 bit mode and old iron.
693  */
694 #ifdef CONFIG_32BIT
695 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
696 #endif
697 #ifdef CONFIG_64BIT
698 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
699 #endif
700 
701 #define __put_user_unaligned_common(ptr, size)				\
702 do {									\
703 	switch (size) {							\
704 	case 1: __put_data_asm("sb", ptr); break;			\
705 	case 2: __put_user_unaligned_asm("ush", ptr); break;		\
706 	case 4: __put_user_unaligned_asm("usw", ptr); break;		\
707 	case 8: __PUT_USER_UNALIGNED_DW(ptr); break;			\
708 	default: __put_user_unaligned_unknown(); break;			\
709 } while (0)
710 
711 #define __put_user_unaligned_nocheck(x,ptr,size)			\
712 ({									\
713 	__typeof__(*(ptr)) __pu_val;					\
714 	int __pu_err = 0;						\
715 									\
716 	__pu_val = (x);							\
717 	__put_user_unaligned_common(ptr, size);				\
718 	__pu_err;							\
719 })
720 
721 #define __put_user_unaligned_check(x,ptr,size)				\
722 ({									\
723 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
724 	__typeof__(*(ptr)) __pu_val = (x);				\
725 	int __pu_err = -EFAULT;						\
726 									\
727 	if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size)))		\
728 		__put_user_unaligned_common(__pu_addr, size);		\
729 									\
730 	__pu_err;							\
731 })
732 
733 #define __put_user_unaligned_asm(insn, ptr)				\
734 {									\
735 	__asm__ __volatile__(						\
736 	"1:	" insn "	%z2, %3		# __put_user_unaligned_asm\n" \
737 	"2:							\n"	\
738 	"	.insn						\n"	\
739 	"	.section	.fixup,\"ax\"			\n"	\
740 	"3:	li	%0, %4					\n"	\
741 	"	j	2b					\n"	\
742 	"	.previous					\n"	\
743 	"	.section	__ex_table,\"a\"		\n"	\
744 	"	" __UA_ADDR "	1b, 3b				\n"	\
745 	"	.previous					\n"	\
746 	: "=r" (__pu_err)						\
747 	: "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),			\
748 	  "i" (-EFAULT));						\
749 }
750 
751 #define __put_user_unaligned_asm_ll32(ptr)				\
752 {									\
753 	__asm__ __volatile__(						\
754 	"1:	sw	%2, (%3)	# __put_user_unaligned_asm_ll32 \n" \
755 	"2:	sw	%D2, 4(%3)				\n"	\
756 	"3:							\n"	\
757 	"	.insn						\n"	\
758 	"	.section	.fixup,\"ax\"			\n"	\
759 	"4:	li	%0, %4					\n"	\
760 	"	j	3b					\n"	\
761 	"	.previous					\n"	\
762 	"	.section	__ex_table,\"a\"		\n"	\
763 	"	" __UA_ADDR "	1b, 4b				\n"	\
764 	"	" __UA_ADDR "	1b + 4, 4b			\n"	\
765 	"	" __UA_ADDR "	2b, 4b				\n"	\
766 	"	" __UA_ADDR "	2b + 4, 4b			\n"	\
767 	"	.previous"						\
768 	: "=r" (__pu_err)						\
769 	: "0" (0), "r" (__pu_val), "r" (ptr),				\
770 	  "i" (-EFAULT));						\
771 }
772 
773 extern void __put_user_unaligned_unknown(void);
774 #endif
775 
776 /*
777  * We're generating jump to subroutines which will be outside the range of
778  * jump instructions
779  */
780 #ifdef MODULE
781 #define __MODULE_JAL(destination)					\
782 	".set\tnoat\n\t"						\
783 	__UA_LA "\t$1, " #destination "\n\t"				\
784 	"jalr\t$1\n\t"							\
785 	".set\tat\n\t"
786 #else
787 #define __MODULE_JAL(destination)					\
788 	"jal\t" #destination "\n\t"
789 #endif
790 
791 #if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) &&	\
792 					      defined(CONFIG_CPU_HAS_PREFETCH))
793 #define DADDI_SCRATCH "$3"
794 #else
795 #define DADDI_SCRATCH "$0"
796 #endif
797 
798 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
799 
800 #define __invoke_copy_from(func, to, from, n)				\
801 ({									\
802 	register void *__cu_to_r __asm__("$4");				\
803 	register const void __user *__cu_from_r __asm__("$5");		\
804 	register long __cu_len_r __asm__("$6");				\
805 									\
806 	__cu_to_r = (to);						\
807 	__cu_from_r = (from);						\
808 	__cu_len_r = (n);						\
809 	__asm__ __volatile__(						\
810 	".set\tnoreorder\n\t"						\
811 	__MODULE_JAL(func)						\
812 	".set\tnoat\n\t"						\
813 	__UA_ADDU "\t$1, %1, %2\n\t"					\
814 	".set\tat\n\t"							\
815 	".set\treorder"							\
816 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
817 	:								\
818 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
819 	  DADDI_SCRATCH, "memory");					\
820 	__cu_len_r;							\
821 })
822 
823 #define __invoke_copy_to(func, to, from, n)				\
824 ({									\
825 	register void __user *__cu_to_r __asm__("$4");			\
826 	register const void *__cu_from_r __asm__("$5");			\
827 	register long __cu_len_r __asm__("$6");				\
828 									\
829 	__cu_to_r = (to);						\
830 	__cu_from_r = (from);						\
831 	__cu_len_r = (n);						\
832 	__asm__ __volatile__(						\
833 	__MODULE_JAL(func)						\
834 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
835 	:								\
836 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
837 	  DADDI_SCRATCH, "memory");					\
838 	__cu_len_r;							\
839 })
840 
841 #define __invoke_copy_from_kernel(to, from, n)				\
842 	__invoke_copy_from(__copy_user, to, from, n)
843 
844 #define __invoke_copy_to_kernel(to, from, n)				\
845 	__invoke_copy_to(__copy_user, to, from, n)
846 
847 #define ___invoke_copy_in_kernel(to, from, n)				\
848 	__invoke_copy_from(__copy_user, to, from, n)
849 
850 #ifndef CONFIG_EVA
851 #define __invoke_copy_from_user(to, from, n)				\
852 	__invoke_copy_from(__copy_user, to, from, n)
853 
854 #define __invoke_copy_to_user(to, from, n)				\
855 	__invoke_copy_to(__copy_user, to, from, n)
856 
857 #define ___invoke_copy_in_user(to, from, n)				\
858 	__invoke_copy_from(__copy_user, to, from, n)
859 
860 #else
861 
862 /* EVA specific functions */
863 
864 extern size_t __copy_from_user_eva(void *__to, const void *__from,
865 				   size_t __n);
866 extern size_t __copy_to_user_eva(void *__to, const void *__from,
867 				 size_t __n);
868 extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
869 
870 /*
871  * Source or destination address is in userland. We need to go through
872  * the TLB
873  */
874 #define __invoke_copy_from_user(to, from, n)				\
875 	__invoke_copy_from(__copy_from_user_eva, to, from, n)
876 
877 #define __invoke_copy_to_user(to, from, n)				\
878 	__invoke_copy_to(__copy_to_user_eva, to, from, n)
879 
880 #define ___invoke_copy_in_user(to, from, n)				\
881 	__invoke_copy_from(__copy_in_user_eva, to, from, n)
882 
883 #endif /* CONFIG_EVA */
884 
885 static inline unsigned long
886 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
887 {
888 	if (eva_kernel_access())
889 		return __invoke_copy_to_kernel(to, from, n);
890 	else
891 		return __invoke_copy_to_user(to, from, n);
892 }
893 
894 static inline unsigned long
895 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
896 {
897 	if (eva_kernel_access())
898 		return __invoke_copy_from_kernel(to, from, n);
899 	else
900 		return __invoke_copy_from_user(to, from, n);
901 }
902 
903 #define INLINE_COPY_FROM_USER
904 #define INLINE_COPY_TO_USER
905 
906 static inline unsigned long
907 raw_copy_in_user(void __user*to, const void __user *from, unsigned long n)
908 {
909 	if (eva_kernel_access())
910 		return ___invoke_copy_in_kernel(to, from, n);
911 	else
912 		return ___invoke_copy_in_user(to, from,	n);
913 }
914 
915 extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size);
916 extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
917 
918 /*
919  * __clear_user: - Zero a block of memory in user space, with less checking.
920  * @to:	  Destination address, in user space.
921  * @n:	  Number of bytes to zero.
922  *
923  * Zero a block of memory in user space.  Caller must check
924  * the specified block with access_ok() before calling this function.
925  *
926  * Returns number of bytes that could not be cleared.
927  * On success, this will be zero.
928  */
929 static inline __kernel_size_t
930 __clear_user(void __user *addr, __kernel_size_t size)
931 {
932 	__kernel_size_t res;
933 
934 	if (eva_kernel_access()) {
935 		__asm__ __volatile__(
936 			"move\t$4, %1\n\t"
937 			"move\t$5, $0\n\t"
938 			"move\t$6, %2\n\t"
939 			__MODULE_JAL(__bzero_kernel)
940 			"move\t%0, $6"
941 			: "=r" (res)
942 			: "r" (addr), "r" (size)
943 			: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
944 	} else {
945 		might_fault();
946 		__asm__ __volatile__(
947 			"move\t$4, %1\n\t"
948 			"move\t$5, $0\n\t"
949 			"move\t$6, %2\n\t"
950 			__MODULE_JAL(__bzero)
951 			"move\t%0, $6"
952 			: "=r" (res)
953 			: "r" (addr), "r" (size)
954 			: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
955 	}
956 
957 	return res;
958 }
959 
960 #define clear_user(addr,n)						\
961 ({									\
962 	void __user * __cl_addr = (addr);				\
963 	unsigned long __cl_size = (n);					\
964 	if (__cl_size && access_ok(VERIFY_WRITE,			\
965 					__cl_addr, __cl_size))		\
966 		__cl_size = __clear_user(__cl_addr, __cl_size);		\
967 	__cl_size;							\
968 })
969 
970 extern long __strncpy_from_kernel_nocheck_asm(char *__to, const char __user *__from, long __len);
971 extern long __strncpy_from_user_nocheck_asm(char *__to, const char __user *__from, long __len);
972 
973 /*
974  * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
975  * @dst:   Destination address, in kernel space.  This buffer must be at
976  *	   least @count bytes long.
977  * @src:   Source address, in user space.
978  * @count: Maximum number of bytes to copy, including the trailing NUL.
979  *
980  * Copies a NUL-terminated string from userspace to kernel space.
981  * Caller must check the specified block with access_ok() before calling
982  * this function.
983  *
984  * On success, returns the length of the string (not including the trailing
985  * NUL).
986  *
987  * If access to userspace fails, returns -EFAULT (some data may have been
988  * copied).
989  *
990  * If @count is smaller than the length of the string, copies @count bytes
991  * and returns @count.
992  */
993 static inline long
994 __strncpy_from_user(char *__to, const char __user *__from, long __len)
995 {
996 	long res;
997 
998 	if (eva_kernel_access()) {
999 		__asm__ __volatile__(
1000 			"move\t$4, %1\n\t"
1001 			"move\t$5, %2\n\t"
1002 			"move\t$6, %3\n\t"
1003 			__MODULE_JAL(__strncpy_from_kernel_nocheck_asm)
1004 			"move\t%0, $2"
1005 			: "=r" (res)
1006 			: "r" (__to), "r" (__from), "r" (__len)
1007 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1008 	} else {
1009 		might_fault();
1010 		__asm__ __volatile__(
1011 			"move\t$4, %1\n\t"
1012 			"move\t$5, %2\n\t"
1013 			"move\t$6, %3\n\t"
1014 			__MODULE_JAL(__strncpy_from_user_nocheck_asm)
1015 			"move\t%0, $2"
1016 			: "=r" (res)
1017 			: "r" (__to), "r" (__from), "r" (__len)
1018 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1019 	}
1020 
1021 	return res;
1022 }
1023 
1024 extern long __strncpy_from_kernel_asm(char *__to, const char __user *__from, long __len);
1025 extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len);
1026 
1027 /*
1028  * strncpy_from_user: - Copy a NUL terminated string from userspace.
1029  * @dst:   Destination address, in kernel space.  This buffer must be at
1030  *	   least @count bytes long.
1031  * @src:   Source address, in user space.
1032  * @count: Maximum number of bytes to copy, including the trailing NUL.
1033  *
1034  * Copies a NUL-terminated string from userspace to kernel space.
1035  *
1036  * On success, returns the length of the string (not including the trailing
1037  * NUL).
1038  *
1039  * If access to userspace fails, returns -EFAULT (some data may have been
1040  * copied).
1041  *
1042  * If @count is smaller than the length of the string, copies @count bytes
1043  * and returns @count.
1044  */
1045 static inline long
1046 strncpy_from_user(char *__to, const char __user *__from, long __len)
1047 {
1048 	long res;
1049 
1050 	if (eva_kernel_access()) {
1051 		__asm__ __volatile__(
1052 			"move\t$4, %1\n\t"
1053 			"move\t$5, %2\n\t"
1054 			"move\t$6, %3\n\t"
1055 			__MODULE_JAL(__strncpy_from_kernel_asm)
1056 			"move\t%0, $2"
1057 			: "=r" (res)
1058 			: "r" (__to), "r" (__from), "r" (__len)
1059 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1060 	} else {
1061 		might_fault();
1062 		__asm__ __volatile__(
1063 			"move\t$4, %1\n\t"
1064 			"move\t$5, %2\n\t"
1065 			"move\t$6, %3\n\t"
1066 			__MODULE_JAL(__strncpy_from_user_asm)
1067 			"move\t%0, $2"
1068 			: "=r" (res)
1069 			: "r" (__to), "r" (__from), "r" (__len)
1070 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1071 	}
1072 
1073 	return res;
1074 }
1075 
1076 extern long __strlen_kernel_asm(const char __user *s);
1077 extern long __strlen_user_asm(const char __user *s);
1078 
1079 /*
1080  * strlen_user: - Get the size of a string in user space.
1081  * @str: The string to measure.
1082  *
1083  * Context: User context only. This function may sleep if pagefaults are
1084  *          enabled.
1085  *
1086  * Get the size of a NUL-terminated string in user space.
1087  *
1088  * Returns the size of the string INCLUDING the terminating NUL.
1089  * On exception, returns 0.
1090  *
1091  * If there is a limit on the length of a valid string, you may wish to
1092  * consider using strnlen_user() instead.
1093  */
1094 static inline long strlen_user(const char __user *s)
1095 {
1096 	long res;
1097 
1098 	if (eva_kernel_access()) {
1099 		__asm__ __volatile__(
1100 			"move\t$4, %1\n\t"
1101 			__MODULE_JAL(__strlen_kernel_asm)
1102 			"move\t%0, $2"
1103 			: "=r" (res)
1104 			: "r" (s)
1105 			: "$2", "$4", __UA_t0, "$31");
1106 	} else {
1107 		might_fault();
1108 		__asm__ __volatile__(
1109 			"move\t$4, %1\n\t"
1110 			__MODULE_JAL(__strlen_user_asm)
1111 			"move\t%0, $2"
1112 			: "=r" (res)
1113 			: "r" (s)
1114 			: "$2", "$4", __UA_t0, "$31");
1115 	}
1116 
1117 	return res;
1118 }
1119 
1120 extern long __strnlen_kernel_nocheck_asm(const char __user *s, long n);
1121 extern long __strnlen_user_nocheck_asm(const char __user *s, long n);
1122 
1123 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1124 static inline long __strnlen_user(const char __user *s, long n)
1125 {
1126 	long res;
1127 
1128 	if (eva_kernel_access()) {
1129 		__asm__ __volatile__(
1130 			"move\t$4, %1\n\t"
1131 			"move\t$5, %2\n\t"
1132 			__MODULE_JAL(__strnlen_kernel_nocheck_asm)
1133 			"move\t%0, $2"
1134 			: "=r" (res)
1135 			: "r" (s), "r" (n)
1136 			: "$2", "$4", "$5", __UA_t0, "$31");
1137 	} else {
1138 		might_fault();
1139 		__asm__ __volatile__(
1140 			"move\t$4, %1\n\t"
1141 			"move\t$5, %2\n\t"
1142 			__MODULE_JAL(__strnlen_user_nocheck_asm)
1143 			"move\t%0, $2"
1144 			: "=r" (res)
1145 			: "r" (s), "r" (n)
1146 			: "$2", "$4", "$5", __UA_t0, "$31");
1147 	}
1148 
1149 	return res;
1150 }
1151 
1152 extern long __strnlen_kernel_asm(const char __user *s, long n);
1153 extern long __strnlen_user_asm(const char __user *s, long n);
1154 
1155 /*
1156  * strnlen_user: - Get the size of a string in user space.
1157  * @str: The string to measure.
1158  *
1159  * Context: User context only. This function may sleep if pagefaults are
1160  *          enabled.
1161  *
1162  * Get the size of a NUL-terminated string in user space.
1163  *
1164  * Returns the size of the string INCLUDING the terminating NUL.
1165  * On exception, returns 0.
1166  * If the string is too long, returns a value greater than @n.
1167  */
1168 static inline long strnlen_user(const char __user *s, long n)
1169 {
1170 	long res;
1171 
1172 	might_fault();
1173 	if (eva_kernel_access()) {
1174 		__asm__ __volatile__(
1175 			"move\t$4, %1\n\t"
1176 			"move\t$5, %2\n\t"
1177 			__MODULE_JAL(__strnlen_kernel_asm)
1178 			"move\t%0, $2"
1179 			: "=r" (res)
1180 			: "r" (s), "r" (n)
1181 			: "$2", "$4", "$5", __UA_t0, "$31");
1182 	} else {
1183 		__asm__ __volatile__(
1184 			"move\t$4, %1\n\t"
1185 			"move\t$5, %2\n\t"
1186 			__MODULE_JAL(__strnlen_user_asm)
1187 			"move\t%0, $2"
1188 			: "=r" (res)
1189 			: "r" (s), "r" (n)
1190 			: "$2", "$4", "$5", __UA_t0, "$31");
1191 	}
1192 
1193 	return res;
1194 }
1195 
1196 #endif /* _ASM_UACCESS_H */
1197