xref: /openbmc/linux/arch/mips/include/asm/uaccess.h (revision 15f3d81a8c8a564cbc8642cf95c548d02db035a7)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  * Copyright (C) 2007  Maciej W. Rozycki
9  * Copyright (C) 2014, Imagination Technologies Ltd.
10  */
11 #ifndef _ASM_UACCESS_H
12 #define _ASM_UACCESS_H
13 
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <asm/asm-eva.h>
17 #include <asm/extable.h>
18 
19 #ifdef CONFIG_32BIT
20 
21 #define __UA_LIMIT 0x80000000UL
22 #define TASK_SIZE_MAX	KSEG0
23 
24 #define __UA_ADDR	".word"
25 #define __UA_LA		"la"
26 #define __UA_ADDU	"addu"
27 #define __UA_t0		"$8"
28 #define __UA_t1		"$9"
29 
30 #endif /* CONFIG_32BIT */
31 
32 #ifdef CONFIG_64BIT
33 
34 extern u64 __ua_limit;
35 
36 #define __UA_LIMIT	__ua_limit
37 #define TASK_SIZE_MAX	XKSSEG
38 
39 #define __UA_ADDR	".dword"
40 #define __UA_LA		"dla"
41 #define __UA_ADDU	"daddu"
42 #define __UA_t0		"$12"
43 #define __UA_t1		"$13"
44 
45 #endif /* CONFIG_64BIT */
46 
47 /*
48  * access_ok: - Checks if a user space pointer is valid
49  * @addr: User space pointer to start of block to check
50  * @size: Size of block to check
51  *
52  * Context: User context only. This function may sleep if pagefaults are
53  *          enabled.
54  *
55  * Checks if a pointer to a block of memory in user space is valid.
56  *
57  * Returns true (nonzero) if the memory block may be valid, false (zero)
58  * if it is definitely invalid.
59  *
60  * Note that, depending on architecture, this function probably just
61  * checks that the pointer is in the user space range - after calling
62  * this function, memory access functions may still return -EFAULT.
63  */
64 
65 static inline int __access_ok(const void __user *p, unsigned long size)
66 {
67 	unsigned long addr = (unsigned long)p;
68 	unsigned long limit = TASK_SIZE_MAX;
69 
70 	return (size <= limit) && (addr <= (limit - size));
71 }
72 
73 #define access_ok(addr, size)					\
74 	likely(__access_ok((addr), (size)))
75 
76 /*
77  * put_user: - Write a simple value into user space.
78  * @x:	 Value to copy to user space.
79  * @ptr: Destination address, in user space.
80  *
81  * Context: User context only. This function may sleep if pagefaults are
82  *          enabled.
83  *
84  * This macro copies a single simple value from kernel space to user
85  * space.  It supports simple types like char and int, but not larger
86  * data types like structures or arrays.
87  *
88  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
89  * to the result of dereferencing @ptr.
90  *
91  * Returns zero on success, or -EFAULT on error.
92  */
93 #define put_user(x, ptr)						\
94 ({									\
95 	__typeof__(*(ptr)) __user *__p = (ptr);				\
96 									\
97 	might_fault();							\
98 	access_ok(__p, sizeof(*__p)) ? __put_user((x), __p) : -EFAULT;	\
99 })
100 
101 /*
102  * get_user: - Get a simple variable from user space.
103  * @x:	 Variable to store result.
104  * @ptr: Source address, in user space.
105  *
106  * Context: User context only. This function may sleep if pagefaults are
107  *          enabled.
108  *
109  * This macro copies a single simple variable from user space to kernel
110  * space.  It supports simple types like char and int, but not larger
111  * data types like structures or arrays.
112  *
113  * @ptr must have pointer-to-simple-variable type, and the result of
114  * dereferencing @ptr must be assignable to @x without a cast.
115  *
116  * Returns zero on success, or -EFAULT on error.
117  * On error, the variable @x is set to zero.
118  */
119 #define get_user(x, ptr)						\
120 ({									\
121 	const __typeof__(*(ptr)) __user *__p = (ptr);			\
122 									\
123 	might_fault();							\
124 	access_ok(__p, sizeof(*__p)) ? __get_user((x), __p) :		\
125 				       ((x) = 0, -EFAULT);		\
126 })
127 
128 /*
129  * __put_user: - Write a simple value into user space, with less checking.
130  * @x:	 Value to copy to user space.
131  * @ptr: Destination address, in user space.
132  *
133  * Context: User context only. This function may sleep if pagefaults are
134  *          enabled.
135  *
136  * This macro copies a single simple value from kernel space to user
137  * space.  It supports simple types like char and int, but not larger
138  * data types like structures or arrays.
139  *
140  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
141  * to the result of dereferencing @ptr.
142  *
143  * Caller must check the pointer with access_ok() before calling this
144  * function.
145  *
146  * Returns zero on success, or -EFAULT on error.
147  */
148 #define __put_user(x, ptr)						\
149 ({									\
150 	__typeof__(*(ptr)) __user *__pu_ptr = (ptr);			\
151 	__typeof__(*(ptr)) __pu_val = (x);				\
152 	int __pu_err = 0;						\
153 									\
154 	__chk_user_ptr(__pu_ptr);					\
155 	switch (sizeof(*__pu_ptr)) {					\
156 	case 1:								\
157 		__put_data_asm(user_sb, __pu_ptr);			\
158 		break;							\
159 	case 2:								\
160 		__put_data_asm(user_sh, __pu_ptr);			\
161 		break;							\
162 	case 4:								\
163 		__put_data_asm(user_sw, __pu_ptr);			\
164 		break;							\
165 	case 8:								\
166 		__PUT_DW(user_sd, __pu_ptr);				\
167 		break;							\
168 	default:							\
169 		BUILD_BUG();						\
170 	}								\
171 									\
172 	__pu_err;							\
173 })
174 
175 /*
176  * __get_user: - Get a simple variable from user space, with less checking.
177  * @x:	 Variable to store result.
178  * @ptr: Source address, in user space.
179  *
180  * Context: User context only. This function may sleep if pagefaults are
181  *          enabled.
182  *
183  * This macro copies a single simple variable from user space to kernel
184  * space.  It supports simple types like char and int, but not larger
185  * data types like structures or arrays.
186  *
187  * @ptr must have pointer-to-simple-variable type, and the result of
188  * dereferencing @ptr must be assignable to @x without a cast.
189  *
190  * Caller must check the pointer with access_ok() before calling this
191  * function.
192  *
193  * Returns zero on success, or -EFAULT on error.
194  * On error, the variable @x is set to zero.
195  */
196 #define __get_user(x, ptr)						\
197 ({									\
198 	const __typeof__(*(ptr)) __user *__gu_ptr = (ptr);		\
199 	int __gu_err = 0;						\
200 									\
201 	__chk_user_ptr(__gu_ptr);					\
202 	switch (sizeof(*__gu_ptr)) {					\
203 	case 1:								\
204 		__get_data_asm((x), user_lb, __gu_ptr);			\
205 		break;							\
206 	case 2:								\
207 		__get_data_asm((x), user_lh, __gu_ptr);			\
208 		break;							\
209 	case 4:								\
210 		__get_data_asm((x), user_lw, __gu_ptr);			\
211 		break;							\
212 	case 8:								\
213 		__GET_DW((x), user_ld, __gu_ptr);			\
214 		break;							\
215 	default:							\
216 		BUILD_BUG();						\
217 	}								\
218 									\
219 	__gu_err;							\
220 })
221 
222 struct __large_struct { unsigned long buf[100]; };
223 #define __m(x) (*(struct __large_struct __user *)(x))
224 
225 #ifdef CONFIG_32BIT
226 #define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
227 #endif
228 #ifdef CONFIG_64BIT
229 #define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
230 #endif
231 
232 #define __get_data_asm(val, insn, addr)					\
233 {									\
234 	long __gu_tmp;							\
235 									\
236 	__asm__ __volatile__(						\
237 	"1:	"insn("%1", "%3")"				\n"	\
238 	"2:							\n"	\
239 	"	.insn						\n"	\
240 	"	.section .fixup,\"ax\"				\n"	\
241 	"3:	li	%0, %4					\n"	\
242 	"	move	%1, $0					\n"	\
243 	"	j	2b					\n"	\
244 	"	.previous					\n"	\
245 	"	.section __ex_table,\"a\"			\n"	\
246 	"	"__UA_ADDR "\t1b, 3b				\n"	\
247 	"	.previous					\n"	\
248 	: "=r" (__gu_err), "=r" (__gu_tmp)				\
249 	: "0" (0), "o" (__m(addr)), "i" (-EFAULT));			\
250 									\
251 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
252 }
253 
254 /*
255  * Get a long long 64 using 32 bit registers.
256  */
257 #define __get_data_asm_ll32(val, insn, addr)				\
258 {									\
259 	union {								\
260 		unsigned long long	l;				\
261 		__typeof__(*(addr))	t;				\
262 	} __gu_tmp;							\
263 									\
264 	__asm__ __volatile__(						\
265 	"1:	" insn("%1", "(%3)")"				\n"	\
266 	"2:	" insn("%D1", "4(%3)")"				\n"	\
267 	"3:							\n"	\
268 	"	.insn						\n"	\
269 	"	.section	.fixup,\"ax\"			\n"	\
270 	"4:	li	%0, %4					\n"	\
271 	"	move	%1, $0					\n"	\
272 	"	move	%D1, $0					\n"	\
273 	"	j	3b					\n"	\
274 	"	.previous					\n"	\
275 	"	.section	__ex_table,\"a\"		\n"	\
276 	"	" __UA_ADDR "	1b, 4b				\n"	\
277 	"	" __UA_ADDR "	2b, 4b				\n"	\
278 	"	.previous					\n"	\
279 	: "=r" (__gu_err), "=&r" (__gu_tmp.l)				\
280 	: "0" (0), "r" (addr), "i" (-EFAULT));				\
281 									\
282 	(val) = __gu_tmp.t;						\
283 }
284 
285 #define __get_kernel_nofault(dst, src, type, err_label)			\
286 do {									\
287 	int __gu_err;							\
288 									\
289 	switch (sizeof(type)) {						\
290 	case 1:								\
291 		__get_data_asm(*(type *)(dst), kernel_lb,		\
292 			       (__force type *)(src));			\
293 		break;							\
294 	case 2:								\
295 		__get_data_asm(*(type *)(dst), kernel_lh,		\
296 			       (__force type *)(src));			\
297 		break;							\
298 	case 4:								\
299 		 __get_data_asm(*(type *)(dst), kernel_lw,		\
300 			       (__force type *)(src));			\
301 		break;							\
302 	case 8:								\
303 		__GET_DW(*(type *)(dst), kernel_ld,			\
304 			 (__force type *)(src));			\
305 		break;							\
306 	default:							\
307 		BUILD_BUG();						\
308 		break;							\
309 	}								\
310 	if (unlikely(__gu_err))						\
311 		goto err_label;						\
312 } while (0)
313 
314 /*
315  * Yuck.  We need two variants, one for 64bit operation and one
316  * for 32 bit mode and old iron.
317  */
318 #ifdef CONFIG_32BIT
319 #define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
320 #endif
321 #ifdef CONFIG_64BIT
322 #define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
323 #endif
324 
325 #define __put_data_asm(insn, ptr)					\
326 {									\
327 	__asm__ __volatile__(						\
328 	"1:	"insn("%z2", "%3")"	# __put_data_asm	\n"	\
329 	"2:							\n"	\
330 	"	.insn						\n"	\
331 	"	.section	.fixup,\"ax\"			\n"	\
332 	"3:	li	%0, %4					\n"	\
333 	"	j	2b					\n"	\
334 	"	.previous					\n"	\
335 	"	.section	__ex_table,\"a\"		\n"	\
336 	"	" __UA_ADDR "	1b, 3b				\n"	\
337 	"	.previous					\n"	\
338 	: "=r" (__pu_err)						\
339 	: "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),			\
340 	  "i" (-EFAULT));						\
341 }
342 
343 #define __put_data_asm_ll32(insn, ptr)					\
344 {									\
345 	__asm__ __volatile__(						\
346 	"1:	"insn("%2", "(%3)")"	# __put_data_asm_ll32	\n"	\
347 	"2:	"insn("%D2", "4(%3)")"				\n"	\
348 	"3:							\n"	\
349 	"	.insn						\n"	\
350 	"	.section	.fixup,\"ax\"			\n"	\
351 	"4:	li	%0, %4					\n"	\
352 	"	j	3b					\n"	\
353 	"	.previous					\n"	\
354 	"	.section	__ex_table,\"a\"		\n"	\
355 	"	" __UA_ADDR "	1b, 4b				\n"	\
356 	"	" __UA_ADDR "	2b, 4b				\n"	\
357 	"	.previous"						\
358 	: "=r" (__pu_err)						\
359 	: "0" (0), "r" (__pu_val), "r" (ptr),				\
360 	  "i" (-EFAULT));						\
361 }
362 
363 #define __put_kernel_nofault(dst, src, type, err_label)			\
364 do {									\
365 	type __pu_val;					\
366 	int __pu_err = 0;						\
367 									\
368 	__pu_val = *(__force type *)(src);				\
369 	switch (sizeof(type)) {						\
370 	case 1:								\
371 		__put_data_asm(kernel_sb, (type *)(dst));		\
372 		break;							\
373 	case 2:								\
374 		__put_data_asm(kernel_sh, (type *)(dst));		\
375 		break;							\
376 	case 4:								\
377 		__put_data_asm(kernel_sw, (type *)(dst))		\
378 		break;							\
379 	case 8:								\
380 		__PUT_DW(kernel_sd, (type *)(dst));			\
381 		break;							\
382 	default:							\
383 		BUILD_BUG();						\
384 		break;							\
385 	}								\
386 	if (unlikely(__pu_err))						\
387 		goto err_label;						\
388 } while (0)
389 
390 
391 /*
392  * We're generating jump to subroutines which will be outside the range of
393  * jump instructions
394  */
395 #ifdef MODULE
396 #define __MODULE_JAL(destination)					\
397 	".set\tnoat\n\t"						\
398 	__UA_LA "\t$1, " #destination "\n\t"				\
399 	"jalr\t$1\n\t"							\
400 	".set\tat\n\t"
401 #else
402 #define __MODULE_JAL(destination)					\
403 	"jal\t" #destination "\n\t"
404 #endif
405 
406 #if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) &&	\
407 					      defined(CONFIG_CPU_HAS_PREFETCH))
408 #define DADDI_SCRATCH "$3"
409 #else
410 #define DADDI_SCRATCH "$0"
411 #endif
412 
413 extern size_t __raw_copy_from_user(void *__to, const void *__from, size_t __n);
414 extern size_t __raw_copy_to_user(void *__to, const void *__from, size_t __n);
415 
416 static inline unsigned long
417 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
418 {
419 	register void *__cu_to_r __asm__("$4");
420 	register const void __user *__cu_from_r __asm__("$5");
421 	register long __cu_len_r __asm__("$6");
422 
423 	__cu_to_r = to;
424 	__cu_from_r = from;
425 	__cu_len_r = n;
426 
427 	__asm__ __volatile__(
428 		".set\tnoreorder\n\t"
429 		__MODULE_JAL(__raw_copy_from_user)
430 		".set\tnoat\n\t"
431 		__UA_ADDU "\t$1, %1, %2\n\t"
432 		".set\tat\n\t"
433 		".set\treorder"
434 		: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)
435 		:
436 		: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",
437 		  DADDI_SCRATCH, "memory");
438 
439 	return __cu_len_r;
440 }
441 
442 static inline unsigned long
443 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
444 {
445 	register void __user *__cu_to_r __asm__("$4");
446 	register const void *__cu_from_r __asm__("$5");
447 	register long __cu_len_r __asm__("$6");
448 
449 	__cu_to_r = (to);
450 	__cu_from_r = (from);
451 	__cu_len_r = (n);
452 
453 	__asm__ __volatile__(
454 		__MODULE_JAL(__raw_copy_to_user)
455 		: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)
456 		:
457 		: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",
458 		  DADDI_SCRATCH, "memory");
459 
460 	return __cu_len_r;
461 }
462 
463 #define INLINE_COPY_FROM_USER
464 #define INLINE_COPY_TO_USER
465 
466 extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
467 
468 /*
469  * __clear_user: - Zero a block of memory in user space, with less checking.
470  * @to:	  Destination address, in user space.
471  * @n:	  Number of bytes to zero.
472  *
473  * Zero a block of memory in user space.  Caller must check
474  * the specified block with access_ok() before calling this function.
475  *
476  * Returns number of bytes that could not be cleared.
477  * On success, this will be zero.
478  */
479 static inline __kernel_size_t
480 __clear_user(void __user *addr, __kernel_size_t size)
481 {
482 	__kernel_size_t res;
483 
484 #ifdef CONFIG_CPU_MICROMIPS
485 /* micromips memset / bzero also clobbers t7 & t8 */
486 #define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31"
487 #else
488 #define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"
489 #endif /* CONFIG_CPU_MICROMIPS */
490 
491 	might_fault();
492 	__asm__ __volatile__(
493 		"move\t$4, %1\n\t"
494 		"move\t$5, $0\n\t"
495 		"move\t$6, %2\n\t"
496 		__MODULE_JAL(__bzero)
497 		"move\t%0, $6"
498 		: "=r" (res)
499 		: "r" (addr), "r" (size)
500 		: bzero_clobbers);
501 
502 	return res;
503 }
504 
505 #define clear_user(addr,n)						\
506 ({									\
507 	void __user * __cl_addr = (addr);				\
508 	unsigned long __cl_size = (n);					\
509 	if (__cl_size && access_ok(__cl_addr, __cl_size))		\
510 		__cl_size = __clear_user(__cl_addr, __cl_size);		\
511 	__cl_size;							\
512 })
513 
514 extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len);
515 
516 /*
517  * strncpy_from_user: - Copy a NUL terminated string from userspace.
518  * @dst:   Destination address, in kernel space.  This buffer must be at
519  *	   least @count bytes long.
520  * @src:   Source address, in user space.
521  * @count: Maximum number of bytes to copy, including the trailing NUL.
522  *
523  * Copies a NUL-terminated string from userspace to kernel space.
524  *
525  * On success, returns the length of the string (not including the trailing
526  * NUL).
527  *
528  * If access to userspace fails, returns -EFAULT (some data may have been
529  * copied).
530  *
531  * If @count is smaller than the length of the string, copies @count bytes
532  * and returns @count.
533  */
534 static inline long
535 strncpy_from_user(char *__to, const char __user *__from, long __len)
536 {
537 	long res;
538 
539 	if (!access_ok(__from, __len))
540 		return -EFAULT;
541 
542 	might_fault();
543 	__asm__ __volatile__(
544 		"move\t$4, %1\n\t"
545 		"move\t$5, %2\n\t"
546 		"move\t$6, %3\n\t"
547 		__MODULE_JAL(__strncpy_from_user_asm)
548 		"move\t%0, $2"
549 		: "=r" (res)
550 		: "r" (__to), "r" (__from), "r" (__len)
551 		: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
552 
553 	return res;
554 }
555 
556 extern long __strnlen_user_asm(const char __user *s, long n);
557 
558 /*
559  * strnlen_user: - Get the size of a string in user space.
560  * @str: The string to measure.
561  *
562  * Context: User context only. This function may sleep if pagefaults are
563  *          enabled.
564  *
565  * Get the size of a NUL-terminated string in user space.
566  *
567  * Returns the size of the string INCLUDING the terminating NUL.
568  * On exception, returns 0.
569  * If the string is too long, returns a value greater than @n.
570  */
571 static inline long strnlen_user(const char __user *s, long n)
572 {
573 	long res;
574 
575 	if (!access_ok(s, 1))
576 		return 0;
577 
578 	might_fault();
579 	__asm__ __volatile__(
580 		"move\t$4, %1\n\t"
581 		"move\t$5, %2\n\t"
582 		__MODULE_JAL(__strnlen_user_asm)
583 		"move\t%0, $2"
584 		: "=r" (res)
585 		: "r" (s), "r" (n)
586 		: "$2", "$4", "$5", __UA_t0, "$31");
587 
588 	return res;
589 }
590 
591 #endif /* _ASM_UACCESS_H */
592