xref: /openbmc/linux/arch/s390/include/asm/uaccess.h (revision 9659281c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999, 2000
5  *    Author(s): Hartmut Penner (hp@de.ibm.com),
6  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
7  *
8  *  Derived from "include/asm-i386/uaccess.h"
9  */
10 #ifndef __S390_UACCESS_H
11 #define __S390_UACCESS_H
12 
13 /*
14  * User space memory access functions
15  */
16 #include <asm/processor.h>
17 #include <asm/ctl_reg.h>
18 #include <asm/extable.h>
19 #include <asm/facility.h>
20 
21 void debug_user_asce(int exit);
22 
23 static inline int __range_ok(unsigned long addr, unsigned long size)
24 {
25 	return 1;
26 }
27 
28 #define __access_ok(addr, size)				\
29 ({							\
30 	__chk_user_ptr(addr);				\
31 	__range_ok((unsigned long)(addr), (size));	\
32 })
33 
34 #define access_ok(addr, size) __access_ok(addr, size)
35 
36 unsigned long __must_check
37 raw_copy_from_user(void *to, const void __user *from, unsigned long n);
38 
39 unsigned long __must_check
40 raw_copy_to_user(void __user *to, const void *from, unsigned long n);
41 
42 #ifndef CONFIG_KASAN
43 #define INLINE_COPY_FROM_USER
44 #define INLINE_COPY_TO_USER
45 #endif
46 
47 int __put_user_bad(void) __attribute__((noreturn));
48 int __get_user_bad(void) __attribute__((noreturn));
49 
50 #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
51 
52 #define __put_get_user_asm(to, from, size, insn)		\
53 ({								\
54 	int __rc;						\
55 								\
56 	asm volatile(						\
57 		insn "		0,%[spec]\n"			\
58 		"0:	mvcos	%[_to],%[_from],%[_size]\n"	\
59 		"1:	xr	%[rc],%[rc]\n"			\
60 		"2:\n"						\
61 		".pushsection .fixup, \"ax\"\n"			\
62 		"3:	lhi	%[rc],%[retval]\n"		\
63 		"	jg	2b\n"				\
64 		".popsection\n"					\
65 		EX_TABLE(0b,3b) EX_TABLE(1b,3b)			\
66 		: [rc] "=&d" (__rc), [_to] "+Q" (*(to))		\
67 		: [_size] "d" (size), [_from] "Q" (*(from)),	\
68 		  [retval] "K" (-EFAULT), [spec] "K" (0x81UL)	\
69 		: "cc", "0");					\
70 	__rc;							\
71 })
72 
73 static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
74 {
75 	int rc;
76 
77 	switch (size) {
78 	case 1:
79 		rc = __put_get_user_asm((unsigned char __user *)ptr,
80 					(unsigned char *)x,
81 					size, "llilh");
82 		break;
83 	case 2:
84 		rc = __put_get_user_asm((unsigned short __user *)ptr,
85 					(unsigned short *)x,
86 					size, "llilh");
87 		break;
88 	case 4:
89 		rc = __put_get_user_asm((unsigned int __user *)ptr,
90 					(unsigned int *)x,
91 					size, "llilh");
92 		break;
93 	case 8:
94 		rc = __put_get_user_asm((unsigned long __user *)ptr,
95 					(unsigned long *)x,
96 					size, "llilh");
97 		break;
98 	default:
99 		__put_user_bad();
100 		break;
101 	}
102 	return rc;
103 }
104 
105 static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
106 {
107 	int rc;
108 
109 	switch (size) {
110 	case 1:
111 		rc = __put_get_user_asm((unsigned char *)x,
112 					(unsigned char __user *)ptr,
113 					size, "lghi");
114 		break;
115 	case 2:
116 		rc = __put_get_user_asm((unsigned short *)x,
117 					(unsigned short __user *)ptr,
118 					size, "lghi");
119 		break;
120 	case 4:
121 		rc = __put_get_user_asm((unsigned int *)x,
122 					(unsigned int __user *)ptr,
123 					size, "lghi");
124 		break;
125 	case 8:
126 		rc = __put_get_user_asm((unsigned long *)x,
127 					(unsigned long __user *)ptr,
128 					size, "lghi");
129 		break;
130 	default:
131 		__get_user_bad();
132 		break;
133 	}
134 	return rc;
135 }
136 
137 #else /* CONFIG_HAVE_MARCH_Z10_FEATURES */
138 
139 static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
140 {
141 	size = raw_copy_to_user(ptr, x, size);
142 	return size ? -EFAULT : 0;
143 }
144 
145 static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
146 {
147 	size = raw_copy_from_user(x, ptr, size);
148 	return size ? -EFAULT : 0;
149 }
150 
151 #endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */
152 
153 /*
154  * These are the main single-value transfer routines.  They automatically
155  * use the right size if we just have the right pointer type.
156  */
157 #define __put_user(x, ptr) \
158 ({								\
159 	__typeof__(*(ptr)) __x = (x);				\
160 	int __pu_err = -EFAULT;					\
161         __chk_user_ptr(ptr);                                    \
162 	switch (sizeof (*(ptr))) {				\
163 	case 1:							\
164 	case 2:							\
165 	case 4:							\
166 	case 8:							\
167 		__pu_err = __put_user_fn(&__x, ptr,		\
168 					 sizeof(*(ptr)));	\
169 		break;						\
170 	default:						\
171 		__put_user_bad();				\
172 		break;						\
173 	}							\
174 	__builtin_expect(__pu_err, 0);				\
175 })
176 
177 #define put_user(x, ptr)					\
178 ({								\
179 	might_fault();						\
180 	__put_user(x, ptr);					\
181 })
182 
183 
184 #define __get_user(x, ptr)					\
185 ({								\
186 	int __gu_err = -EFAULT;					\
187 	__chk_user_ptr(ptr);					\
188 	switch (sizeof(*(ptr))) {				\
189 	case 1: {						\
190 		unsigned char __x = 0;				\
191 		__gu_err = __get_user_fn(&__x, ptr,		\
192 					 sizeof(*(ptr)));	\
193 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
194 		break;						\
195 	};							\
196 	case 2: {						\
197 		unsigned short __x = 0;				\
198 		__gu_err = __get_user_fn(&__x, ptr,		\
199 					 sizeof(*(ptr)));	\
200 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
201 		break;						\
202 	};							\
203 	case 4: {						\
204 		unsigned int __x = 0;				\
205 		__gu_err = __get_user_fn(&__x, ptr,		\
206 					 sizeof(*(ptr)));	\
207 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
208 		break;						\
209 	};							\
210 	case 8: {						\
211 		unsigned long long __x = 0;			\
212 		__gu_err = __get_user_fn(&__x, ptr,		\
213 					 sizeof(*(ptr)));	\
214 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
215 		break;						\
216 	};							\
217 	default:						\
218 		__get_user_bad();				\
219 		break;						\
220 	}							\
221 	__builtin_expect(__gu_err, 0);				\
222 })
223 
224 #define get_user(x, ptr)					\
225 ({								\
226 	might_fault();						\
227 	__get_user(x, ptr);					\
228 })
229 
230 unsigned long __must_check
231 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n);
232 
233 /*
234  * Copy a null terminated string from userspace.
235  */
236 
237 long __strncpy_from_user(char *dst, const char __user *src, long count);
238 
239 static inline long __must_check
240 strncpy_from_user(char *dst, const char __user *src, long count)
241 {
242 	might_fault();
243 	return __strncpy_from_user(dst, src, count);
244 }
245 
246 unsigned long __must_check __strnlen_user(const char __user *src, unsigned long count);
247 
248 static inline unsigned long strnlen_user(const char __user *src, unsigned long n)
249 {
250 	might_fault();
251 	return __strnlen_user(src, n);
252 }
253 
254 /*
255  * Zero Userspace
256  */
257 unsigned long __must_check __clear_user(void __user *to, unsigned long size);
258 
259 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
260 {
261 	might_fault();
262 	return __clear_user(to, n);
263 }
264 
265 int copy_to_user_real(void __user *dest, void *src, unsigned long count);
266 void *s390_kernel_write(void *dst, const void *src, size_t size);
267 
268 #define HAVE_GET_KERNEL_NOFAULT
269 
270 int __noreturn __put_kernel_bad(void);
271 
272 #define __put_kernel_asm(val, to, insn)					\
273 ({									\
274 	int __rc;							\
275 									\
276 	asm volatile(							\
277 		"0:   " insn "  %2,%1\n"				\
278 		"1:	xr	%0,%0\n"				\
279 		"2:\n"							\
280 		".pushsection .fixup, \"ax\"\n"				\
281 		"3:	lhi	%0,%3\n"				\
282 		"	jg	2b\n"					\
283 		".popsection\n"						\
284 		EX_TABLE(0b,3b) EX_TABLE(1b,3b)				\
285 		: "=d" (__rc), "+Q" (*(to))				\
286 		: "d" (val), "K" (-EFAULT)				\
287 		: "cc");						\
288 	__rc;								\
289 })
290 
291 #define __put_kernel_nofault(dst, src, type, err_label)			\
292 do {									\
293 	u64 __x = (u64)(*((type *)(src)));				\
294 	int __pk_err;							\
295 									\
296 	switch (sizeof(type)) {						\
297 	case 1:								\
298 		__pk_err = __put_kernel_asm(__x, (type *)(dst), "stc"); \
299 		break;							\
300 	case 2:								\
301 		__pk_err = __put_kernel_asm(__x, (type *)(dst), "sth"); \
302 		break;							\
303 	case 4:								\
304 		__pk_err = __put_kernel_asm(__x, (type *)(dst), "st");	\
305 		break;							\
306 	case 8:								\
307 		__pk_err = __put_kernel_asm(__x, (type *)(dst), "stg"); \
308 		break;							\
309 	default:							\
310 		__pk_err = __put_kernel_bad();				\
311 		break;							\
312 	}								\
313 	if (unlikely(__pk_err))						\
314 		goto err_label;						\
315 } while (0)
316 
317 int __noreturn __get_kernel_bad(void);
318 
319 #define __get_kernel_asm(val, from, insn)				\
320 ({									\
321 	int __rc;							\
322 									\
323 	asm volatile(							\
324 		"0:   " insn "  %1,%2\n"				\
325 		"1:	xr	%0,%0\n"				\
326 		"2:\n"							\
327 		".pushsection .fixup, \"ax\"\n"				\
328 		"3:	lhi	%0,%3\n"				\
329 		"	jg	2b\n"					\
330 		".popsection\n"						\
331 		EX_TABLE(0b,3b) EX_TABLE(1b,3b)				\
332 		: "=d" (__rc), "+d" (val)				\
333 		: "Q" (*(from)), "K" (-EFAULT)				\
334 		: "cc");						\
335 	__rc;								\
336 })
337 
338 #define __get_kernel_nofault(dst, src, type, err_label)			\
339 do {									\
340 	int __gk_err;							\
341 									\
342 	switch (sizeof(type)) {						\
343 	case 1: {							\
344 		u8 __x = 0;						\
345 									\
346 		__gk_err = __get_kernel_asm(__x, (type *)(src), "ic");	\
347 		*((type *)(dst)) = (type)__x;				\
348 		break;							\
349 	};								\
350 	case 2: {							\
351 		u16 __x = 0;						\
352 									\
353 		__gk_err = __get_kernel_asm(__x, (type *)(src), "lh");	\
354 		*((type *)(dst)) = (type)__x;				\
355 		break;							\
356 	};								\
357 	case 4: {							\
358 		u32 __x = 0;						\
359 									\
360 		__gk_err = __get_kernel_asm(__x, (type *)(src), "l");	\
361 		*((type *)(dst)) = (type)__x;				\
362 		break;							\
363 	};								\
364 	case 8: {							\
365 		u64 __x = 0;						\
366 									\
367 		__gk_err = __get_kernel_asm(__x, (type *)(src), "lg");	\
368 		*((type *)(dst)) = (type)__x;				\
369 		break;							\
370 	};								\
371 	default:							\
372 		__gk_err = __get_kernel_bad();				\
373 		break;							\
374 	}								\
375 	if (unlikely(__gk_err))						\
376 		goto err_label;						\
377 } while (0)
378 
379 #endif /* __S390_UACCESS_H */
380