xref: /openbmc/linux/arch/mips/include/asm/uaccess.h (revision 34737e26980341519d00e84711fe619f9f47e79c)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  * Copyright (C) 2007  Maciej W. Rozycki
9  * Copyright (C) 2014, Imagination Technologies Ltd.
10  */
11 #ifndef _ASM_UACCESS_H
12 #define _ASM_UACCESS_H
13 
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <asm/asm-eva.h>
17 #include <asm/extable.h>
18 
19 #ifdef CONFIG_32BIT
20 
21 #define __UA_LIMIT 0x80000000UL
22 
23 #define __UA_ADDR	".word"
24 #define __UA_LA		"la"
25 #define __UA_ADDU	"addu"
26 #define __UA_t0		"$8"
27 #define __UA_t1		"$9"
28 
29 #endif /* CONFIG_32BIT */
30 
31 #ifdef CONFIG_64BIT
32 
33 extern u64 __ua_limit;
34 
35 #define __UA_LIMIT	__ua_limit
36 
37 #define __UA_ADDR	".dword"
38 #define __UA_LA		"dla"
39 #define __UA_ADDU	"daddu"
40 #define __UA_t0		"$12"
41 #define __UA_t1		"$13"
42 
43 #endif /* CONFIG_64BIT */
44 
45 /*
46  * Is a address valid? This does a straightforward calculation rather
47  * than tests.
48  *
49  * Address valid if:
50  *  - "addr" doesn't have any high-bits set
51  *  - AND "size" doesn't have any high-bits set
52  *  - AND "addr+size" doesn't have any high-bits set
53  *  - OR we are in kernel mode.
54  *
55  * __ua_size() is a trick to avoid runtime checking of positive constant
56  * sizes; for those we already know at compile time that the size is ok.
57  */
58 #define __ua_size(size)							\
59 	((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
60 
61 /*
62  * access_ok: - Checks if a user space pointer is valid
63  * @addr: User space pointer to start of block to check
64  * @size: Size of block to check
65  *
66  * Context: User context only. This function may sleep if pagefaults are
67  *          enabled.
68  *
69  * Checks if a pointer to a block of memory in user space is valid.
70  *
71  * Returns true (nonzero) if the memory block may be valid, false (zero)
72  * if it is definitely invalid.
73  *
74  * Note that, depending on architecture, this function probably just
75  * checks that the pointer is in the user space range - after calling
76  * this function, memory access functions may still return -EFAULT.
77  */
78 
79 static inline int __access_ok(const void __user *p, unsigned long size)
80 {
81 	unsigned long addr = (unsigned long)p;
82 	unsigned long end = addr + size - !!size;
83 
84 	return (__UA_LIMIT & (addr | end | __ua_size(size))) == 0;
85 }
86 
87 #define access_ok(addr, size)					\
88 	likely(__access_ok((addr), (size)))
89 
90 /*
91  * put_user: - Write a simple value into user space.
92  * @x:	 Value to copy to user space.
93  * @ptr: Destination address, in user space.
94  *
95  * Context: User context only. This function may sleep if pagefaults are
96  *          enabled.
97  *
98  * This macro copies a single simple value from kernel space to user
99  * space.  It supports simple types like char and int, but not larger
100  * data types like structures or arrays.
101  *
102  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
103  * to the result of dereferencing @ptr.
104  *
105  * Returns zero on success, or -EFAULT on error.
106  */
107 #define put_user(x, ptr)						\
108 ({									\
109 	__typeof__(*(ptr)) __user *__p = (ptr);				\
110 									\
111 	might_fault();							\
112 	access_ok(__p, sizeof(*__p)) ? __put_user((x), __p) : -EFAULT;	\
113 })
114 
115 /*
116  * get_user: - Get a simple variable from user space.
117  * @x:	 Variable to store result.
118  * @ptr: Source address, in user space.
119  *
120  * Context: User context only. This function may sleep if pagefaults are
121  *          enabled.
122  *
123  * This macro copies a single simple variable from user space to kernel
124  * space.  It supports simple types like char and int, but not larger
125  * data types like structures or arrays.
126  *
127  * @ptr must have pointer-to-simple-variable type, and the result of
128  * dereferencing @ptr must be assignable to @x without a cast.
129  *
130  * Returns zero on success, or -EFAULT on error.
131  * On error, the variable @x is set to zero.
132  */
133 #define get_user(x, ptr)						\
134 ({									\
135 	const __typeof__(*(ptr)) __user *__p = (ptr);			\
136 									\
137 	might_fault();							\
138 	access_ok(__p, sizeof(*__p)) ? __get_user((x), __p) :		\
139 				       ((x) = 0, -EFAULT);		\
140 })
141 
142 /*
143  * __put_user: - Write a simple value into user space, with less checking.
144  * @x:	 Value to copy to user space.
145  * @ptr: Destination address, in user space.
146  *
147  * Context: User context only. This function may sleep if pagefaults are
148  *          enabled.
149  *
150  * This macro copies a single simple value from kernel space to user
151  * space.  It supports simple types like char and int, but not larger
152  * data types like structures or arrays.
153  *
154  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
155  * to the result of dereferencing @ptr.
156  *
157  * Caller must check the pointer with access_ok() before calling this
158  * function.
159  *
160  * Returns zero on success, or -EFAULT on error.
161  */
162 #define __put_user(x, ptr)						\
163 ({									\
164 	__typeof__(*(ptr)) __user *__pu_ptr = (ptr);			\
165 	__typeof__(*(ptr)) __pu_val = (x);				\
166 	int __pu_err = 0;						\
167 									\
168 	__chk_user_ptr(__pu_ptr);					\
169 	switch (sizeof(*__pu_ptr)) {					\
170 	case 1:								\
171 		__put_data_asm(user_sb, __pu_ptr);			\
172 		break;							\
173 	case 2:								\
174 		__put_data_asm(user_sh, __pu_ptr);			\
175 		break;							\
176 	case 4:								\
177 		__put_data_asm(user_sw, __pu_ptr);			\
178 		break;							\
179 	case 8:								\
180 		__PUT_DW(user_sd, __pu_ptr);				\
181 		break;							\
182 	default:							\
183 		BUILD_BUG();						\
184 	}								\
185 									\
186 	__pu_err;							\
187 })
188 
189 /*
190  * __get_user: - Get a simple variable from user space, with less checking.
191  * @x:	 Variable to store result.
192  * @ptr: Source address, in user space.
193  *
194  * Context: User context only. This function may sleep if pagefaults are
195  *          enabled.
196  *
197  * This macro copies a single simple variable from user space to kernel
198  * space.  It supports simple types like char and int, but not larger
199  * data types like structures or arrays.
200  *
201  * @ptr must have pointer-to-simple-variable type, and the result of
202  * dereferencing @ptr must be assignable to @x without a cast.
203  *
204  * Caller must check the pointer with access_ok() before calling this
205  * function.
206  *
207  * Returns zero on success, or -EFAULT on error.
208  * On error, the variable @x is set to zero.
209  */
210 #define __get_user(x, ptr)						\
211 ({									\
212 	const __typeof__(*(ptr)) __user *__gu_ptr = (ptr);		\
213 	int __gu_err = 0;						\
214 									\
215 	__chk_user_ptr(__gu_ptr);					\
216 	switch (sizeof(*__gu_ptr)) {					\
217 	case 1:								\
218 		__get_data_asm((x), user_lb, __gu_ptr);			\
219 		break;							\
220 	case 2:								\
221 		__get_data_asm((x), user_lh, __gu_ptr);			\
222 		break;							\
223 	case 4:								\
224 		__get_data_asm((x), user_lw, __gu_ptr);			\
225 		break;							\
226 	case 8:								\
227 		__GET_DW((x), user_ld, __gu_ptr);			\
228 		break;							\
229 	default:							\
230 		BUILD_BUG();						\
231 	}								\
232 									\
233 	__gu_err;							\
234 })
235 
236 struct __large_struct { unsigned long buf[100]; };
237 #define __m(x) (*(struct __large_struct __user *)(x))
238 
239 #ifdef CONFIG_32BIT
240 #define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
241 #endif
242 #ifdef CONFIG_64BIT
243 #define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
244 #endif
245 
246 #define __get_data_asm(val, insn, addr)					\
247 {									\
248 	long __gu_tmp;							\
249 									\
250 	__asm__ __volatile__(						\
251 	"1:	"insn("%1", "%3")"				\n"	\
252 	"2:							\n"	\
253 	"	.insn						\n"	\
254 	"	.section .fixup,\"ax\"				\n"	\
255 	"3:	li	%0, %4					\n"	\
256 	"	move	%1, $0					\n"	\
257 	"	j	2b					\n"	\
258 	"	.previous					\n"	\
259 	"	.section __ex_table,\"a\"			\n"	\
260 	"	"__UA_ADDR "\t1b, 3b				\n"	\
261 	"	.previous					\n"	\
262 	: "=r" (__gu_err), "=r" (__gu_tmp)				\
263 	: "0" (0), "o" (__m(addr)), "i" (-EFAULT));			\
264 									\
265 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
266 }
267 
268 /*
269  * Get a long long 64 using 32 bit registers.
270  */
271 #define __get_data_asm_ll32(val, insn, addr)				\
272 {									\
273 	union {								\
274 		unsigned long long	l;				\
275 		__typeof__(*(addr))	t;				\
276 	} __gu_tmp;							\
277 									\
278 	__asm__ __volatile__(						\
279 	"1:	" insn("%1", "(%3)")"				\n"	\
280 	"2:	" insn("%D1", "4(%3)")"				\n"	\
281 	"3:							\n"	\
282 	"	.insn						\n"	\
283 	"	.section	.fixup,\"ax\"			\n"	\
284 	"4:	li	%0, %4					\n"	\
285 	"	move	%1, $0					\n"	\
286 	"	move	%D1, $0					\n"	\
287 	"	j	3b					\n"	\
288 	"	.previous					\n"	\
289 	"	.section	__ex_table,\"a\"		\n"	\
290 	"	" __UA_ADDR "	1b, 4b				\n"	\
291 	"	" __UA_ADDR "	2b, 4b				\n"	\
292 	"	.previous					\n"	\
293 	: "=r" (__gu_err), "=&r" (__gu_tmp.l)				\
294 	: "0" (0), "r" (addr), "i" (-EFAULT));				\
295 									\
296 	(val) = __gu_tmp.t;						\
297 }
298 
299 #define __get_kernel_nofault(dst, src, type, err_label)			\
300 do {									\
301 	int __gu_err;							\
302 									\
303 	switch (sizeof(type)) {						\
304 	case 1:								\
305 		__get_data_asm(*(type *)(dst), kernel_lb,		\
306 			       (__force type *)(src));			\
307 		break;							\
308 	case 2:								\
309 		__get_data_asm(*(type *)(dst), kernel_lh,		\
310 			       (__force type *)(src));			\
311 		break;							\
312 	case 4:								\
313 		 __get_data_asm(*(type *)(dst), kernel_lw,		\
314 			       (__force type *)(src));			\
315 		break;							\
316 	case 8:								\
317 		__GET_DW(*(type *)(dst), kernel_ld,			\
318 			 (__force type *)(src));			\
319 		break;							\
320 	default:							\
321 		BUILD_BUG();						\
322 		break;							\
323 	}								\
324 	if (unlikely(__gu_err))						\
325 		goto err_label;						\
326 } while (0)
327 
328 /*
329  * Yuck.  We need two variants, one for 64bit operation and one
330  * for 32 bit mode and old iron.
331  */
332 #ifdef CONFIG_32BIT
333 #define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
334 #endif
335 #ifdef CONFIG_64BIT
336 #define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
337 #endif
338 
339 #define __put_data_asm(insn, ptr)					\
340 {									\
341 	__asm__ __volatile__(						\
342 	"1:	"insn("%z2", "%3")"	# __put_data_asm	\n"	\
343 	"2:							\n"	\
344 	"	.insn						\n"	\
345 	"	.section	.fixup,\"ax\"			\n"	\
346 	"3:	li	%0, %4					\n"	\
347 	"	j	2b					\n"	\
348 	"	.previous					\n"	\
349 	"	.section	__ex_table,\"a\"		\n"	\
350 	"	" __UA_ADDR "	1b, 3b				\n"	\
351 	"	.previous					\n"	\
352 	: "=r" (__pu_err)						\
353 	: "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),			\
354 	  "i" (-EFAULT));						\
355 }
356 
357 #define __put_data_asm_ll32(insn, ptr)					\
358 {									\
359 	__asm__ __volatile__(						\
360 	"1:	"insn("%2", "(%3)")"	# __put_data_asm_ll32	\n"	\
361 	"2:	"insn("%D2", "4(%3)")"				\n"	\
362 	"3:							\n"	\
363 	"	.insn						\n"	\
364 	"	.section	.fixup,\"ax\"			\n"	\
365 	"4:	li	%0, %4					\n"	\
366 	"	j	3b					\n"	\
367 	"	.previous					\n"	\
368 	"	.section	__ex_table,\"a\"		\n"	\
369 	"	" __UA_ADDR "	1b, 4b				\n"	\
370 	"	" __UA_ADDR "	2b, 4b				\n"	\
371 	"	.previous"						\
372 	: "=r" (__pu_err)						\
373 	: "0" (0), "r" (__pu_val), "r" (ptr),				\
374 	  "i" (-EFAULT));						\
375 }
376 
377 #define __put_kernel_nofault(dst, src, type, err_label)			\
378 do {									\
379 	type __pu_val;					\
380 	int __pu_err = 0;						\
381 									\
382 	__pu_val = *(__force type *)(src);				\
383 	switch (sizeof(type)) {						\
384 	case 1:								\
385 		__put_data_asm(kernel_sb, (type *)(dst));		\
386 		break;							\
387 	case 2:								\
388 		__put_data_asm(kernel_sh, (type *)(dst));		\
389 		break;							\
390 	case 4:								\
391 		__put_data_asm(kernel_sw, (type *)(dst))		\
392 		break;							\
393 	case 8:								\
394 		__PUT_DW(kernel_sd, (type *)(dst));			\
395 		break;							\
396 	default:							\
397 		BUILD_BUG();						\
398 		break;							\
399 	}								\
400 	if (unlikely(__pu_err))						\
401 		goto err_label;						\
402 } while (0)
403 
404 
405 /*
406  * We're generating jump to subroutines which will be outside the range of
407  * jump instructions
408  */
409 #ifdef MODULE
410 #define __MODULE_JAL(destination)					\
411 	".set\tnoat\n\t"						\
412 	__UA_LA "\t$1, " #destination "\n\t"				\
413 	"jalr\t$1\n\t"							\
414 	".set\tat\n\t"
415 #else
416 #define __MODULE_JAL(destination)					\
417 	"jal\t" #destination "\n\t"
418 #endif
419 
420 #if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) &&	\
421 					      defined(CONFIG_CPU_HAS_PREFETCH))
422 #define DADDI_SCRATCH "$3"
423 #else
424 #define DADDI_SCRATCH "$0"
425 #endif
426 
427 extern size_t __raw_copy_from_user(void *__to, const void *__from, size_t __n);
428 extern size_t __raw_copy_to_user(void *__to, const void *__from, size_t __n);
429 
430 static inline unsigned long
431 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
432 {
433 	register void *__cu_to_r __asm__("$4");
434 	register const void __user *__cu_from_r __asm__("$5");
435 	register long __cu_len_r __asm__("$6");
436 
437 	__cu_to_r = to;
438 	__cu_from_r = from;
439 	__cu_len_r = n;
440 
441 	__asm__ __volatile__(
442 		".set\tnoreorder\n\t"
443 		__MODULE_JAL(__raw_copy_from_user)
444 		".set\tnoat\n\t"
445 		__UA_ADDU "\t$1, %1, %2\n\t"
446 		".set\tat\n\t"
447 		".set\treorder"
448 		: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)
449 		:
450 		: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",
451 		  DADDI_SCRATCH, "memory");
452 
453 	return __cu_len_r;
454 }
455 
456 static inline unsigned long
457 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
458 {
459 	register void __user *__cu_to_r __asm__("$4");
460 	register const void *__cu_from_r __asm__("$5");
461 	register long __cu_len_r __asm__("$6");
462 
463 	__cu_to_r = (to);
464 	__cu_from_r = (from);
465 	__cu_len_r = (n);
466 
467 	__asm__ __volatile__(
468 		__MODULE_JAL(__raw_copy_to_user)
469 		: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)
470 		:
471 		: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",
472 		  DADDI_SCRATCH, "memory");
473 
474 	return __cu_len_r;
475 }
476 
477 #define INLINE_COPY_FROM_USER
478 #define INLINE_COPY_TO_USER
479 
480 extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
481 
482 /*
483  * __clear_user: - Zero a block of memory in user space, with less checking.
484  * @to:	  Destination address, in user space.
485  * @n:	  Number of bytes to zero.
486  *
487  * Zero a block of memory in user space.  Caller must check
488  * the specified block with access_ok() before calling this function.
489  *
490  * Returns number of bytes that could not be cleared.
491  * On success, this will be zero.
492  */
493 static inline __kernel_size_t
494 __clear_user(void __user *addr, __kernel_size_t size)
495 {
496 	__kernel_size_t res;
497 
498 #ifdef CONFIG_CPU_MICROMIPS
499 /* micromips memset / bzero also clobbers t7 & t8 */
500 #define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31"
501 #else
502 #define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"
503 #endif /* CONFIG_CPU_MICROMIPS */
504 
505 	might_fault();
506 	__asm__ __volatile__(
507 		"move\t$4, %1\n\t"
508 		"move\t$5, $0\n\t"
509 		"move\t$6, %2\n\t"
510 		__MODULE_JAL(__bzero)
511 		"move\t%0, $6"
512 		: "=r" (res)
513 		: "r" (addr), "r" (size)
514 		: bzero_clobbers);
515 
516 	return res;
517 }
518 
519 #define clear_user(addr,n)						\
520 ({									\
521 	void __user * __cl_addr = (addr);				\
522 	unsigned long __cl_size = (n);					\
523 	if (__cl_size && access_ok(__cl_addr, __cl_size))		\
524 		__cl_size = __clear_user(__cl_addr, __cl_size);		\
525 	__cl_size;							\
526 })
527 
528 extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len);
529 
530 /*
531  * strncpy_from_user: - Copy a NUL terminated string from userspace.
532  * @dst:   Destination address, in kernel space.  This buffer must be at
533  *	   least @count bytes long.
534  * @src:   Source address, in user space.
535  * @count: Maximum number of bytes to copy, including the trailing NUL.
536  *
537  * Copies a NUL-terminated string from userspace to kernel space.
538  *
539  * On success, returns the length of the string (not including the trailing
540  * NUL).
541  *
542  * If access to userspace fails, returns -EFAULT (some data may have been
543  * copied).
544  *
545  * If @count is smaller than the length of the string, copies @count bytes
546  * and returns @count.
547  */
548 static inline long
549 strncpy_from_user(char *__to, const char __user *__from, long __len)
550 {
551 	long res;
552 
553 	if (!access_ok(__from, __len))
554 		return -EFAULT;
555 
556 	might_fault();
557 	__asm__ __volatile__(
558 		"move\t$4, %1\n\t"
559 		"move\t$5, %2\n\t"
560 		"move\t$6, %3\n\t"
561 		__MODULE_JAL(__strncpy_from_user_asm)
562 		"move\t%0, $2"
563 		: "=r" (res)
564 		: "r" (__to), "r" (__from), "r" (__len)
565 		: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
566 
567 	return res;
568 }
569 
570 extern long __strnlen_user_asm(const char __user *s, long n);
571 
572 /*
573  * strnlen_user: - Get the size of a string in user space.
574  * @str: The string to measure.
575  *
576  * Context: User context only. This function may sleep if pagefaults are
577  *          enabled.
578  *
579  * Get the size of a NUL-terminated string in user space.
580  *
581  * Returns the size of the string INCLUDING the terminating NUL.
582  * On exception, returns 0.
583  * If the string is too long, returns a value greater than @n.
584  */
585 static inline long strnlen_user(const char __user *s, long n)
586 {
587 	long res;
588 
589 	if (!access_ok(s, 1))
590 		return 0;
591 
592 	might_fault();
593 	__asm__ __volatile__(
594 		"move\t$4, %1\n\t"
595 		"move\t$5, %2\n\t"
596 		__MODULE_JAL(__strnlen_user_asm)
597 		"move\t%0, $2"
598 		: "=r" (res)
599 		: "r" (s), "r" (n)
600 		: "$2", "$4", "$5", __UA_t0, "$31");
601 
602 	return res;
603 }
604 
605 #endif /* _ASM_UACCESS_H */
606