xref: /openbmc/linux/arch/s390/include/asm/uaccess.h (revision 8ffdff6a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999, 2000
5  *    Author(s): Hartmut Penner (hp@de.ibm.com),
6  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
7  *
8  *  Derived from "include/asm-i386/uaccess.h"
9  */
10 #ifndef __S390_UACCESS_H
11 #define __S390_UACCESS_H
12 
13 /*
14  * User space memory access functions
15  */
16 #include <asm/processor.h>
17 #include <asm/ctl_reg.h>
18 #include <asm/extable.h>
19 #include <asm/facility.h>
20 
21 void debug_user_asce(int exit);
22 
23 static inline int __range_ok(unsigned long addr, unsigned long size)
24 {
25 	return 1;
26 }
27 
28 #define __access_ok(addr, size)				\
29 ({							\
30 	__chk_user_ptr(addr);				\
31 	__range_ok((unsigned long)(addr), (size));	\
32 })
33 
34 #define access_ok(addr, size) __access_ok(addr, size)
35 
36 unsigned long __must_check
37 raw_copy_from_user(void *to, const void __user *from, unsigned long n);
38 
39 unsigned long __must_check
40 raw_copy_to_user(void __user *to, const void *from, unsigned long n);
41 
42 #ifndef CONFIG_KASAN
43 #define INLINE_COPY_FROM_USER
44 #define INLINE_COPY_TO_USER
45 #endif
46 
47 int __put_user_bad(void) __attribute__((noreturn));
48 int __get_user_bad(void) __attribute__((noreturn));
49 
50 #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
51 
52 #define __put_get_user_asm(to, from, size, spec)		\
53 ({								\
54 	register unsigned long __reg0 asm("0") = spec;		\
55 	int __rc;						\
56 								\
57 	asm volatile(						\
58 		"0:	mvcos	%1,%3,%2\n"			\
59 		"1:	xr	%0,%0\n"			\
60 		"2:\n"						\
61 		".pushsection .fixup, \"ax\"\n"			\
62 		"3:	lhi	%0,%5\n"			\
63 		"	jg	2b\n"				\
64 		".popsection\n"					\
65 		EX_TABLE(0b,3b) EX_TABLE(1b,3b)			\
66 		: "=d" (__rc), "+Q" (*(to))			\
67 		: "d" (size), "Q" (*(from)),			\
68 		  "d" (__reg0), "K" (-EFAULT)			\
69 		: "cc");					\
70 	__rc;							\
71 })
72 
73 static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
74 {
75 	unsigned long spec = 0x810000UL;
76 	int rc;
77 
78 	switch (size) {
79 	case 1:
80 		rc = __put_get_user_asm((unsigned char __user *)ptr,
81 					(unsigned char *)x,
82 					size, spec);
83 		break;
84 	case 2:
85 		rc = __put_get_user_asm((unsigned short __user *)ptr,
86 					(unsigned short *)x,
87 					size, spec);
88 		break;
89 	case 4:
90 		rc = __put_get_user_asm((unsigned int __user *)ptr,
91 					(unsigned int *)x,
92 					size, spec);
93 		break;
94 	case 8:
95 		rc = __put_get_user_asm((unsigned long __user *)ptr,
96 					(unsigned long *)x,
97 					size, spec);
98 		break;
99 	default:
100 		__put_user_bad();
101 		break;
102 	}
103 	return rc;
104 }
105 
106 static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
107 {
108 	unsigned long spec = 0x81UL;
109 	int rc;
110 
111 	switch (size) {
112 	case 1:
113 		rc = __put_get_user_asm((unsigned char *)x,
114 					(unsigned char __user *)ptr,
115 					size, spec);
116 		break;
117 	case 2:
118 		rc = __put_get_user_asm((unsigned short *)x,
119 					(unsigned short __user *)ptr,
120 					size, spec);
121 		break;
122 	case 4:
123 		rc = __put_get_user_asm((unsigned int *)x,
124 					(unsigned int __user *)ptr,
125 					size, spec);
126 		break;
127 	case 8:
128 		rc = __put_get_user_asm((unsigned long *)x,
129 					(unsigned long __user *)ptr,
130 					size, spec);
131 		break;
132 	default:
133 		__get_user_bad();
134 		break;
135 	}
136 	return rc;
137 }
138 
139 #else /* CONFIG_HAVE_MARCH_Z10_FEATURES */
140 
141 static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
142 {
143 	size = raw_copy_to_user(ptr, x, size);
144 	return size ? -EFAULT : 0;
145 }
146 
147 static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
148 {
149 	size = raw_copy_from_user(x, ptr, size);
150 	return size ? -EFAULT : 0;
151 }
152 
153 #endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */
154 
155 /*
156  * These are the main single-value transfer routines.  They automatically
157  * use the right size if we just have the right pointer type.
158  */
159 #define __put_user(x, ptr) \
160 ({								\
161 	__typeof__(*(ptr)) __x = (x);				\
162 	int __pu_err = -EFAULT;					\
163         __chk_user_ptr(ptr);                                    \
164 	switch (sizeof (*(ptr))) {				\
165 	case 1:							\
166 	case 2:							\
167 	case 4:							\
168 	case 8:							\
169 		__pu_err = __put_user_fn(&__x, ptr,		\
170 					 sizeof(*(ptr)));	\
171 		break;						\
172 	default:						\
173 		__put_user_bad();				\
174 		break;						\
175 	}							\
176 	__builtin_expect(__pu_err, 0);				\
177 })
178 
179 #define put_user(x, ptr)					\
180 ({								\
181 	might_fault();						\
182 	__put_user(x, ptr);					\
183 })
184 
185 
186 #define __get_user(x, ptr)					\
187 ({								\
188 	int __gu_err = -EFAULT;					\
189 	__chk_user_ptr(ptr);					\
190 	switch (sizeof(*(ptr))) {				\
191 	case 1: {						\
192 		unsigned char __x = 0;				\
193 		__gu_err = __get_user_fn(&__x, ptr,		\
194 					 sizeof(*(ptr)));	\
195 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
196 		break;						\
197 	};							\
198 	case 2: {						\
199 		unsigned short __x = 0;				\
200 		__gu_err = __get_user_fn(&__x, ptr,		\
201 					 sizeof(*(ptr)));	\
202 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
203 		break;						\
204 	};							\
205 	case 4: {						\
206 		unsigned int __x = 0;				\
207 		__gu_err = __get_user_fn(&__x, ptr,		\
208 					 sizeof(*(ptr)));	\
209 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
210 		break;						\
211 	};							\
212 	case 8: {						\
213 		unsigned long long __x = 0;			\
214 		__gu_err = __get_user_fn(&__x, ptr,		\
215 					 sizeof(*(ptr)));	\
216 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
217 		break;						\
218 	};							\
219 	default:						\
220 		__get_user_bad();				\
221 		break;						\
222 	}							\
223 	__builtin_expect(__gu_err, 0);				\
224 })
225 
226 #define get_user(x, ptr)					\
227 ({								\
228 	might_fault();						\
229 	__get_user(x, ptr);					\
230 })
231 
232 unsigned long __must_check
233 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n);
234 
235 /*
236  * Copy a null terminated string from userspace.
237  */
238 
239 long __strncpy_from_user(char *dst, const char __user *src, long count);
240 
241 static inline long __must_check
242 strncpy_from_user(char *dst, const char __user *src, long count)
243 {
244 	might_fault();
245 	return __strncpy_from_user(dst, src, count);
246 }
247 
248 unsigned long __must_check __strnlen_user(const char __user *src, unsigned long count);
249 
250 static inline unsigned long strnlen_user(const char __user *src, unsigned long n)
251 {
252 	might_fault();
253 	return __strnlen_user(src, n);
254 }
255 
256 /*
257  * Zero Userspace
258  */
259 unsigned long __must_check __clear_user(void __user *to, unsigned long size);
260 
261 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
262 {
263 	might_fault();
264 	return __clear_user(to, n);
265 }
266 
267 int copy_to_user_real(void __user *dest, void *src, unsigned long count);
268 void *s390_kernel_write(void *dst, const void *src, size_t size);
269 
270 #define HAVE_GET_KERNEL_NOFAULT
271 
272 int __noreturn __put_kernel_bad(void);
273 
274 #define __put_kernel_asm(val, to, insn)					\
275 ({									\
276 	int __rc;							\
277 									\
278 	asm volatile(							\
279 		"0:   " insn "  %2,%1\n"				\
280 		"1:	xr	%0,%0\n"				\
281 		"2:\n"							\
282 		".pushsection .fixup, \"ax\"\n"				\
283 		"3:	lhi	%0,%3\n"				\
284 		"	jg	2b\n"					\
285 		".popsection\n"						\
286 		EX_TABLE(0b,3b) EX_TABLE(1b,3b)				\
287 		: "=d" (__rc), "+Q" (*(to))				\
288 		: "d" (val), "K" (-EFAULT)				\
289 		: "cc");						\
290 	__rc;								\
291 })
292 
293 #define __put_kernel_nofault(dst, src, type, err_label)			\
294 do {									\
295 	u64 __x = (u64)(*((type *)(src)));				\
296 	int __pk_err;							\
297 									\
298 	switch (sizeof(type)) {						\
299 	case 1:								\
300 		__pk_err = __put_kernel_asm(__x, (type *)(dst), "stc"); \
301 		break;							\
302 	case 2:								\
303 		__pk_err = __put_kernel_asm(__x, (type *)(dst), "sth"); \
304 		break;							\
305 	case 4:								\
306 		__pk_err = __put_kernel_asm(__x, (type *)(dst), "st");	\
307 		break;							\
308 	case 8:								\
309 		__pk_err = __put_kernel_asm(__x, (type *)(dst), "stg"); \
310 		break;							\
311 	default:							\
312 		__pk_err = __put_kernel_bad();				\
313 		break;							\
314 	}								\
315 	if (unlikely(__pk_err))						\
316 		goto err_label;						\
317 } while (0)
318 
319 int __noreturn __get_kernel_bad(void);
320 
321 #define __get_kernel_asm(val, from, insn)				\
322 ({									\
323 	int __rc;							\
324 									\
325 	asm volatile(							\
326 		"0:   " insn "  %1,%2\n"				\
327 		"1:	xr	%0,%0\n"				\
328 		"2:\n"							\
329 		".pushsection .fixup, \"ax\"\n"				\
330 		"3:	lhi	%0,%3\n"				\
331 		"	jg	2b\n"					\
332 		".popsection\n"						\
333 		EX_TABLE(0b,3b) EX_TABLE(1b,3b)				\
334 		: "=d" (__rc), "+d" (val)				\
335 		: "Q" (*(from)), "K" (-EFAULT)				\
336 		: "cc");						\
337 	__rc;								\
338 })
339 
340 #define __get_kernel_nofault(dst, src, type, err_label)			\
341 do {									\
342 	int __gk_err;							\
343 									\
344 	switch (sizeof(type)) {						\
345 	case 1: {							\
346 		u8 __x = 0;						\
347 									\
348 		__gk_err = __get_kernel_asm(__x, (type *)(src), "ic");	\
349 		*((type *)(dst)) = (type)__x;				\
350 		break;							\
351 	};								\
352 	case 2: {							\
353 		u16 __x = 0;						\
354 									\
355 		__gk_err = __get_kernel_asm(__x, (type *)(src), "lh");	\
356 		*((type *)(dst)) = (type)__x;				\
357 		break;							\
358 	};								\
359 	case 4: {							\
360 		u32 __x = 0;						\
361 									\
362 		__gk_err = __get_kernel_asm(__x, (type *)(src), "l");	\
363 		*((type *)(dst)) = (type)__x;				\
364 		break;							\
365 	};								\
366 	case 8: {							\
367 		u64 __x = 0;						\
368 									\
369 		__gk_err = __get_kernel_asm(__x, (type *)(src), "lg");	\
370 		*((type *)(dst)) = (type)__x;				\
371 		break;							\
372 	};								\
373 	default:							\
374 		__gk_err = __get_kernel_bad();				\
375 		break;							\
376 	}								\
377 	if (unlikely(__gk_err))						\
378 		goto err_label;						\
379 } while (0)
380 
381 #endif /* __S390_UACCESS_H */
382