xref: /openbmc/linux/arch/sparc/include/asm/uaccess_32.h (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2a439fe51SSam Ravnborg /*
3a439fe51SSam Ravnborg  * uaccess.h: User space memore access functions.
4a439fe51SSam Ravnborg  *
5a439fe51SSam Ravnborg  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6a439fe51SSam Ravnborg  * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7a439fe51SSam Ravnborg  */
8a439fe51SSam Ravnborg #ifndef _ASM_UACCESS_H
9a439fe51SSam Ravnborg #define _ASM_UACCESS_H
10a439fe51SSam Ravnborg 
11a439fe51SSam Ravnborg #include <linux/compiler.h>
12a439fe51SSam Ravnborg #include <linux/string.h>
13a439fe51SSam Ravnborg 
142c66f623SDavid Miller #include <asm/processor.h>
15*12700c17SArnd Bergmann #include <asm-generic/access_ok.h>
16a439fe51SSam Ravnborg 
17a439fe51SSam Ravnborg /* Uh, these should become the main single-value transfer routines..
18a439fe51SSam Ravnborg  * They automatically use the right size if we just have the right
19a439fe51SSam Ravnborg  * pointer type..
20a439fe51SSam Ravnborg  *
21a439fe51SSam Ravnborg  * This gets kind of ugly. We want to return _two_ values in "get_user()"
22a439fe51SSam Ravnborg  * and yet we don't want to do any pointers, because that is too much
23a439fe51SSam Ravnborg  * of a performance impact. Thus we have a few rather ugly macros here,
24a439fe51SSam Ravnborg  * and hide all the ugliness from the user.
25a439fe51SSam Ravnborg  */
26a439fe51SSam Ravnborg #define put_user(x, ptr) ({ \
27*12700c17SArnd Bergmann 	void __user *__pu_addr = (ptr); \
28a439fe51SSam Ravnborg 	__chk_user_ptr(ptr); \
298ccf7b25SMichael S. Tsirkin 	__put_user_check((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr))); \
308ccf7b25SMichael S. Tsirkin })
31a439fe51SSam Ravnborg 
32a439fe51SSam Ravnborg #define get_user(x, ptr) ({ \
33*12700c17SArnd Bergmann 	const void __user *__gu_addr = (ptr); \
34a439fe51SSam Ravnborg 	__chk_user_ptr(ptr); \
358ccf7b25SMichael S. Tsirkin 	__get_user_check((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr))); \
368ccf7b25SMichael S. Tsirkin })
37a439fe51SSam Ravnborg 
38a439fe51SSam Ravnborg /*
39a439fe51SSam Ravnborg  * The "__xxx" versions do not do address space checking, useful when
40a439fe51SSam Ravnborg  * doing multiple accesses to the same area (the user has to do the
41a439fe51SSam Ravnborg  * checks by hand with "access_ok()")
42a439fe51SSam Ravnborg  */
438ccf7b25SMichael S. Tsirkin #define __put_user(x, ptr) \
448ccf7b25SMichael S. Tsirkin 	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
458ccf7b25SMichael S. Tsirkin #define __get_user(x, ptr) \
468ccf7b25SMichael S. Tsirkin     __get_user_nocheck((x), (ptr), sizeof(*(ptr)), __typeof__(*(ptr)))
47a439fe51SSam Ravnborg 
48a439fe51SSam Ravnborg struct __large_struct { unsigned long buf[100]; };
49a439fe51SSam Ravnborg #define __m(x) ((struct __large_struct __user *)(x))
50a439fe51SSam Ravnborg 
51a439fe51SSam Ravnborg #define __put_user_check(x, addr, size) ({ \
52a439fe51SSam Ravnborg 	register int __pu_ret; \
53a439fe51SSam Ravnborg 	if (__access_ok(addr, size)) { \
54a439fe51SSam Ravnborg 		switch (size) { \
558ccf7b25SMichael S. Tsirkin 		case 1: \
568ccf7b25SMichael S. Tsirkin 			__put_user_asm(x, b, addr, __pu_ret); \
578ccf7b25SMichael S. Tsirkin 			break; \
588ccf7b25SMichael S. Tsirkin 		case 2: \
598ccf7b25SMichael S. Tsirkin 			__put_user_asm(x, h, addr, __pu_ret); \
608ccf7b25SMichael S. Tsirkin 			break; \
618ccf7b25SMichael S. Tsirkin 		case 4: \
628ccf7b25SMichael S. Tsirkin 			__put_user_asm(x, , addr, __pu_ret); \
638ccf7b25SMichael S. Tsirkin 			break; \
648ccf7b25SMichael S. Tsirkin 		case 8: \
658ccf7b25SMichael S. Tsirkin 			__put_user_asm(x, d, addr, __pu_ret); \
668ccf7b25SMichael S. Tsirkin 			break; \
678ccf7b25SMichael S. Tsirkin 		default: \
688ccf7b25SMichael S. Tsirkin 			__pu_ret = __put_user_bad(); \
698ccf7b25SMichael S. Tsirkin 			break; \
708ccf7b25SMichael S. Tsirkin 		} \
718ccf7b25SMichael S. Tsirkin 	} else { \
728ccf7b25SMichael S. Tsirkin 		__pu_ret = -EFAULT; \
738ccf7b25SMichael S. Tsirkin 	} \
748ccf7b25SMichael S. Tsirkin 	__pu_ret; \
758ccf7b25SMichael S. Tsirkin })
76a439fe51SSam Ravnborg 
77a439fe51SSam Ravnborg #define __put_user_nocheck(x, addr, size) ({			\
78a439fe51SSam Ravnborg 	register int __pu_ret;					\
79a439fe51SSam Ravnborg 	switch (size) {						\
800795cb1bSMichael S. Tsirkin 	case 1: __put_user_asm(x, b, addr, __pu_ret); break;	\
810795cb1bSMichael S. Tsirkin 	case 2: __put_user_asm(x, h, addr, __pu_ret); break;	\
820795cb1bSMichael S. Tsirkin 	case 4: __put_user_asm(x, , addr, __pu_ret); break;	\
830795cb1bSMichael S. Tsirkin 	case 8: __put_user_asm(x, d, addr, __pu_ret); break;	\
840795cb1bSMichael S. Tsirkin 	default: __pu_ret = __put_user_bad(); break;		\
858ccf7b25SMichael S. Tsirkin 	} \
868ccf7b25SMichael S. Tsirkin 	__pu_ret; \
878ccf7b25SMichael S. Tsirkin })
88a439fe51SSam Ravnborg 
89a439fe51SSam Ravnborg #define __put_user_asm(x, size, addr, ret)				\
90a439fe51SSam Ravnborg __asm__ __volatile__(							\
91a439fe51SSam Ravnborg 		"/* Put user asm, inline. */\n"				\
92a439fe51SSam Ravnborg 	"1:\t"	"st"#size " %1, %2\n\t"					\
93a439fe51SSam Ravnborg 		"clr	%0\n"						\
94a439fe51SSam Ravnborg 	"2:\n\n\t"							\
95a439fe51SSam Ravnborg 		".section .fixup,#alloc,#execinstr\n\t"			\
96a439fe51SSam Ravnborg 		".align	4\n"						\
97a439fe51SSam Ravnborg 	"3:\n\t"							\
98a439fe51SSam Ravnborg 		"b	2b\n\t"						\
99a439fe51SSam Ravnborg 		" mov	%3, %0\n\t"					\
100a439fe51SSam Ravnborg 		".previous\n\n\t"					\
101a439fe51SSam Ravnborg 		".section __ex_table,#alloc\n\t"			\
102a439fe51SSam Ravnborg 		".align	4\n\t"						\
103a439fe51SSam Ravnborg 		".word	1b, 3b\n\t"					\
104a439fe51SSam Ravnborg 		".previous\n\n\t"					\
105a439fe51SSam Ravnborg 	       : "=&r" (ret) : "r" (x), "m" (*__m(addr)),		\
106a439fe51SSam Ravnborg 		 "i" (-EFAULT))
107a439fe51SSam Ravnborg 
108f05a6865SSam Ravnborg int __put_user_bad(void);
109a439fe51SSam Ravnborg 
110a439fe51SSam Ravnborg #define __get_user_check(x, addr, size, type) ({ \
111a439fe51SSam Ravnborg 	register int __gu_ret; \
112a439fe51SSam Ravnborg 	register unsigned long __gu_val; \
113a439fe51SSam Ravnborg 	if (__access_ok(addr, size)) { \
114a439fe51SSam Ravnborg 		switch (size) { \
1158ccf7b25SMichael S. Tsirkin 		case 1: \
1168ccf7b25SMichael S. Tsirkin 			 __get_user_asm(__gu_val, ub, addr, __gu_ret); \
1178ccf7b25SMichael S. Tsirkin 			break; \
1188ccf7b25SMichael S. Tsirkin 		case 2: \
1198ccf7b25SMichael S. Tsirkin 			__get_user_asm(__gu_val, uh, addr, __gu_ret); \
1208ccf7b25SMichael S. Tsirkin 			break; \
1218ccf7b25SMichael S. Tsirkin 		case 4: \
1228ccf7b25SMichael S. Tsirkin 			__get_user_asm(__gu_val, , addr, __gu_ret); \
1238ccf7b25SMichael S. Tsirkin 			break; \
1248ccf7b25SMichael S. Tsirkin 		case 8: \
1258ccf7b25SMichael S. Tsirkin 			__get_user_asm(__gu_val, d, addr, __gu_ret); \
1268ccf7b25SMichael S. Tsirkin 			break; \
1278ccf7b25SMichael S. Tsirkin 		default: \
1288ccf7b25SMichael S. Tsirkin 			__gu_val = 0; \
1298ccf7b25SMichael S. Tsirkin 			__gu_ret = __get_user_bad(); \
1308ccf7b25SMichael S. Tsirkin 			break; \
1318ccf7b25SMichael S. Tsirkin 		} \
1328ccf7b25SMichael S. Tsirkin 	 } else { \
1338ccf7b25SMichael S. Tsirkin 		 __gu_val = 0; \
1348ccf7b25SMichael S. Tsirkin 		 __gu_ret = -EFAULT; \
1358ccf7b25SMichael S. Tsirkin 	} \
1368ccf7b25SMichael S. Tsirkin 	x = (__force type) __gu_val; \
1378ccf7b25SMichael S. Tsirkin 	__gu_ret; \
1388ccf7b25SMichael S. Tsirkin })
139a439fe51SSam Ravnborg 
140a439fe51SSam Ravnborg #define __get_user_nocheck(x, addr, size, type) ({			\
141a439fe51SSam Ravnborg 	register int __gu_ret;						\
142a439fe51SSam Ravnborg 	register unsigned long __gu_val;				\
143a439fe51SSam Ravnborg 	switch (size) {							\
1440795cb1bSMichael S. Tsirkin 	case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break;	\
1450795cb1bSMichael S. Tsirkin 	case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break;	\
1460795cb1bSMichael S. Tsirkin 	case 4: __get_user_asm(__gu_val, , addr, __gu_ret); break;	\
1470795cb1bSMichael S. Tsirkin 	case 8: __get_user_asm(__gu_val, d, addr, __gu_ret); break;	\
1488ccf7b25SMichael S. Tsirkin 	default:							\
1498ccf7b25SMichael S. Tsirkin 		__gu_val = 0;						\
1508ccf7b25SMichael S. Tsirkin 		__gu_ret = __get_user_bad();				\
1518ccf7b25SMichael S. Tsirkin 		break;							\
1528ccf7b25SMichael S. Tsirkin 	}								\
1538ccf7b25SMichael S. Tsirkin 	x = (__force type) __gu_val;					\
1548ccf7b25SMichael S. Tsirkin 	__gu_ret;							\
1558ccf7b25SMichael S. Tsirkin })
156a439fe51SSam Ravnborg 
157a439fe51SSam Ravnborg #define __get_user_asm(x, size, addr, ret)				\
158a439fe51SSam Ravnborg __asm__ __volatile__(							\
159a439fe51SSam Ravnborg 		"/* Get user asm, inline. */\n"				\
160a439fe51SSam Ravnborg 	"1:\t"	"ld"#size " %2, %1\n\t"					\
161a439fe51SSam Ravnborg 		"clr	%0\n"						\
162a439fe51SSam Ravnborg 	"2:\n\n\t"							\
163a439fe51SSam Ravnborg 		".section .fixup,#alloc,#execinstr\n\t"			\
164a439fe51SSam Ravnborg 		".align	4\n"						\
165a439fe51SSam Ravnborg 	"3:\n\t"							\
166a439fe51SSam Ravnborg 		"clr	%1\n\t"						\
167a439fe51SSam Ravnborg 		"b	2b\n\t"						\
168a439fe51SSam Ravnborg 		" mov	%3, %0\n\n\t"					\
169a439fe51SSam Ravnborg 		".previous\n\t"						\
170a439fe51SSam Ravnborg 		".section __ex_table,#alloc\n\t"			\
171a439fe51SSam Ravnborg 		".align	4\n\t"						\
172a439fe51SSam Ravnborg 		".word	1b, 3b\n\n\t"					\
173a439fe51SSam Ravnborg 		".previous\n\t"						\
174a439fe51SSam Ravnborg 	       : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)),		\
175a439fe51SSam Ravnborg 		 "i" (-EFAULT))
176a439fe51SSam Ravnborg 
177f05a6865SSam Ravnborg int __get_user_bad(void);
178a439fe51SSam Ravnborg 
179f05a6865SSam Ravnborg unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size);
180a439fe51SSam Ravnborg 
raw_copy_to_user(void __user * to,const void * from,unsigned long n)18131af2f36SAl Viro static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
182a439fe51SSam Ravnborg {
183a439fe51SSam Ravnborg 	return __copy_user(to, (__force void __user *) from, n);
184a439fe51SSam Ravnborg }
185a439fe51SSam Ravnborg 
raw_copy_from_user(void * to,const void __user * from,unsigned long n)18631af2f36SAl Viro static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
187a439fe51SSam Ravnborg {
188a439fe51SSam Ravnborg 	return __copy_user((__force void __user *) to, from, n);
189a439fe51SSam Ravnborg }
190a439fe51SSam Ravnborg 
19131af2f36SAl Viro #define INLINE_COPY_FROM_USER
19231af2f36SAl Viro #define INLINE_COPY_TO_USER
193a439fe51SSam Ravnborg 
__clear_user(void __user * addr,unsigned long size)194a439fe51SSam Ravnborg static inline unsigned long __clear_user(void __user *addr, unsigned long size)
195a439fe51SSam Ravnborg {
196a439fe51SSam Ravnborg 	unsigned long ret;
197a439fe51SSam Ravnborg 
198a439fe51SSam Ravnborg 	__asm__ __volatile__ (
199a439fe51SSam Ravnborg 		"mov %2, %%o1\n"
200a439fe51SSam Ravnborg 		"call __bzero\n\t"
201a439fe51SSam Ravnborg 		" mov %1, %%o0\n\t"
202a439fe51SSam Ravnborg 		"mov %%o0, %0\n"
203a439fe51SSam Ravnborg 		: "=r" (ret) : "r" (addr), "r" (size) :
204a439fe51SSam Ravnborg 		"o0", "o1", "o2", "o3", "o4", "o5", "o7",
205a439fe51SSam Ravnborg 		"g1", "g2", "g3", "g4", "g5", "g7", "cc");
206a439fe51SSam Ravnborg 
207a439fe51SSam Ravnborg 	return ret;
208a439fe51SSam Ravnborg }
209a439fe51SSam Ravnborg 
clear_user(void __user * addr,unsigned long n)210a439fe51SSam Ravnborg static inline unsigned long clear_user(void __user *addr, unsigned long n)
211a439fe51SSam Ravnborg {
212*12700c17SArnd Bergmann 	if (n && __access_ok(addr, n))
213a439fe51SSam Ravnborg 		return __clear_user(addr, n);
214a439fe51SSam Ravnborg 	else
215a439fe51SSam Ravnborg 		return n;
216a439fe51SSam Ravnborg }
217a439fe51SSam Ravnborg 
218f05a6865SSam Ravnborg __must_check long strnlen_user(const char __user *str, long n);
219a439fe51SSam Ravnborg 
220a439fe51SSam Ravnborg #endif /* _ASM_UACCESS_H */
221