xref: /openbmc/linux/include/asm-generic/uaccess.h (revision 3ca3af7d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_GENERIC_UACCESS_H
3 #define __ASM_GENERIC_UACCESS_H
4 
5 /*
6  * User space memory access functions, these should work
7  * on any machine that has kernel and user data in the same
8  * address space, e.g. all NOMMU machines.
9  */
10 #include <linux/string.h>
11 
12 #ifdef CONFIG_UACCESS_MEMCPY
13 #include <asm/unaligned.h>
14 
15 static __always_inline int
16 __get_user_fn(size_t size, const void __user *from, void *to)
17 {
18 	BUILD_BUG_ON(!__builtin_constant_p(size));
19 
20 	switch (size) {
21 	case 1:
22 		*(u8 *)to = *((u8 __force *)from);
23 		return 0;
24 	case 2:
25 		*(u16 *)to = get_unaligned((u16 __force *)from);
26 		return 0;
27 	case 4:
28 		*(u32 *)to = get_unaligned((u32 __force *)from);
29 		return 0;
30 	case 8:
31 		*(u64 *)to = get_unaligned((u64 __force *)from);
32 		return 0;
33 	default:
34 		BUILD_BUG();
35 		return 0;
36 	}
37 
38 }
39 #define __get_user_fn(sz, u, k)	__get_user_fn(sz, u, k)
40 
41 static __always_inline int
42 __put_user_fn(size_t size, void __user *to, void *from)
43 {
44 	BUILD_BUG_ON(!__builtin_constant_p(size));
45 
46 	switch (size) {
47 	case 1:
48 		*(u8 __force *)to = *(u8 *)from;
49 		return 0;
50 	case 2:
51 		put_unaligned(*(u16 *)from, (u16 __force *)to);
52 		return 0;
53 	case 4:
54 		put_unaligned(*(u32 *)from, (u32 __force *)to);
55 		return 0;
56 	case 8:
57 		put_unaligned(*(u64 *)from, (u64 __force *)to);
58 		return 0;
59 	default:
60 		BUILD_BUG();
61 		return 0;
62 	}
63 }
64 #define __put_user_fn(sz, u, k)	__put_user_fn(sz, u, k)
65 
66 #define __get_kernel_nofault(dst, src, type, err_label)			\
67 do {									\
68 	*((type *)dst) = get_unaligned((type *)(src));			\
69 	if (0) /* make sure the label looks used to the compiler */	\
70 		goto err_label;						\
71 } while (0)
72 
73 #define __put_kernel_nofault(dst, src, type, err_label)			\
74 do {									\
75 	put_unaligned(*((type *)src), (type *)(dst));			\
76 	if (0) /* make sure the label looks used to the compiler */	\
77 		goto err_label;						\
78 } while (0)
79 
80 #define HAVE_GET_KERNEL_NOFAULT 1
81 
82 static inline __must_check unsigned long
83 raw_copy_from_user(void *to, const void __user * from, unsigned long n)
84 {
85 	memcpy(to, (const void __force *)from, n);
86 	return 0;
87 }
88 
89 static inline __must_check unsigned long
90 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
91 {
92 	memcpy((void __force *)to, from, n);
93 	return 0;
94 }
95 #define INLINE_COPY_FROM_USER
96 #define INLINE_COPY_TO_USER
97 #endif /* CONFIG_UACCESS_MEMCPY */
98 
99 #ifdef CONFIG_SET_FS
100 #define MAKE_MM_SEG(s)	((mm_segment_t) { (s) })
101 
102 #ifndef KERNEL_DS
103 #define KERNEL_DS	MAKE_MM_SEG(~0UL)
104 #endif
105 
106 #ifndef USER_DS
107 #define USER_DS		MAKE_MM_SEG(TASK_SIZE - 1)
108 #endif
109 
110 #ifndef get_fs
111 #define get_fs()	(current_thread_info()->addr_limit)
112 
113 static inline void set_fs(mm_segment_t fs)
114 {
115 	current_thread_info()->addr_limit = fs;
116 }
117 #endif
118 
119 #ifndef uaccess_kernel
120 #define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
121 #endif
122 
123 #ifndef user_addr_max
124 #define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE)
125 #endif
126 
127 #endif /* CONFIG_SET_FS */
128 
129 #define access_ok(addr, size) __access_ok((unsigned long)(addr),(size))
130 
131 /*
132  * The architecture should really override this if possible, at least
133  * doing a check on the get_fs()
134  */
135 #ifndef __access_ok
136 static inline int __access_ok(unsigned long addr, unsigned long size)
137 {
138 	return 1;
139 }
140 #endif
141 
142 /*
143  * These are the main single-value transfer routines.  They automatically
144  * use the right size if we just have the right pointer type.
145  * This version just falls back to copy_{from,to}_user, which should
146  * provide a fast-path for small values.
147  */
148 #define __put_user(x, ptr) \
149 ({								\
150 	__typeof__(*(ptr)) __x = (x);				\
151 	int __pu_err = -EFAULT;					\
152         __chk_user_ptr(ptr);                                    \
153 	switch (sizeof (*(ptr))) {				\
154 	case 1:							\
155 	case 2:							\
156 	case 4:							\
157 	case 8:							\
158 		__pu_err = __put_user_fn(sizeof (*(ptr)),	\
159 					 ptr, &__x);		\
160 		break;						\
161 	default:						\
162 		__put_user_bad();				\
163 		break;						\
164 	 }							\
165 	__pu_err;						\
166 })
167 
168 #define put_user(x, ptr)					\
169 ({								\
170 	void __user *__p = (ptr);				\
171 	might_fault();						\
172 	access_ok(__p, sizeof(*ptr)) ?		\
173 		__put_user((x), ((__typeof__(*(ptr)) __user *)__p)) :	\
174 		-EFAULT;					\
175 })
176 
177 #ifndef __put_user_fn
178 
179 static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
180 {
181 	return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0;
182 }
183 
184 #define __put_user_fn(sz, u, k)	__put_user_fn(sz, u, k)
185 
186 #endif
187 
188 extern int __put_user_bad(void) __attribute__((noreturn));
189 
190 #define __get_user(x, ptr)					\
191 ({								\
192 	int __gu_err = -EFAULT;					\
193 	__chk_user_ptr(ptr);					\
194 	switch (sizeof(*(ptr))) {				\
195 	case 1: {						\
196 		unsigned char __x = 0;				\
197 		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
198 					 ptr, &__x);		\
199 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
200 		break;						\
201 	};							\
202 	case 2: {						\
203 		unsigned short __x = 0;				\
204 		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
205 					 ptr, &__x);		\
206 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
207 		break;						\
208 	};							\
209 	case 4: {						\
210 		unsigned int __x = 0;				\
211 		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
212 					 ptr, &__x);		\
213 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
214 		break;						\
215 	};							\
216 	case 8: {						\
217 		unsigned long long __x = 0;			\
218 		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
219 					 ptr, &__x);		\
220 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
221 		break;						\
222 	};							\
223 	default:						\
224 		__get_user_bad();				\
225 		break;						\
226 	}							\
227 	__gu_err;						\
228 })
229 
230 #define get_user(x, ptr)					\
231 ({								\
232 	const void __user *__p = (ptr);				\
233 	might_fault();						\
234 	access_ok(__p, sizeof(*ptr)) ?		\
235 		__get_user((x), (__typeof__(*(ptr)) __user *)__p) :\
236 		((x) = (__typeof__(*(ptr)))0,-EFAULT);		\
237 })
238 
239 #ifndef __get_user_fn
240 static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
241 {
242 	return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0;
243 }
244 
245 #define __get_user_fn(sz, u, k)	__get_user_fn(sz, u, k)
246 
247 #endif
248 
249 extern int __get_user_bad(void) __attribute__((noreturn));
250 
251 /*
252  * Zero Userspace
253  */
254 #ifndef __clear_user
255 static inline __must_check unsigned long
256 __clear_user(void __user *to, unsigned long n)
257 {
258 	memset((void __force *)to, 0, n);
259 	return 0;
260 }
261 #endif
262 
263 static inline __must_check unsigned long
264 clear_user(void __user *to, unsigned long n)
265 {
266 	might_fault();
267 	if (!access_ok(to, n))
268 		return n;
269 
270 	return __clear_user(to, n);
271 }
272 
273 #include <asm/extable.h>
274 
275 __must_check long strncpy_from_user(char *dst, const char __user *src,
276 				    long count);
277 __must_check long strnlen_user(const char __user *src, long n);
278 
279 #endif /* __ASM_GENERIC_UACCESS_H */
280