xref: /openbmc/linux/arch/s390/include/asm/uaccess.h (revision 2f0754f2)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999, 2000
5  *    Author(s): Hartmut Penner (hp@de.ibm.com),
6  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
7  *
8  *  Derived from "include/asm-i386/uaccess.h"
9  */
10 #ifndef __S390_UACCESS_H
11 #define __S390_UACCESS_H
12 
13 /*
14  * User space memory access functions
15  */
16 #include <asm/processor.h>
17 #include <asm/ctl_reg.h>
18 #include <asm/extable.h>
19 #include <asm/facility.h>
20 
21 void debug_user_asce(int exit);
22 
23 static inline int __range_ok(unsigned long addr, unsigned long size)
24 {
25 	return 1;
26 }
27 
28 #define __access_ok(addr, size)				\
29 ({							\
30 	__chk_user_ptr(addr);				\
31 	__range_ok((unsigned long)(addr), (size));	\
32 })
33 
34 #define access_ok(addr, size) __access_ok(addr, size)
35 
36 unsigned long __must_check
37 raw_copy_from_user(void *to, const void __user *from, unsigned long n);
38 
39 unsigned long __must_check
40 raw_copy_to_user(void __user *to, const void *from, unsigned long n);
41 
42 #ifndef CONFIG_KASAN
43 #define INLINE_COPY_FROM_USER
44 #define INLINE_COPY_TO_USER
45 #endif
46 
47 int __put_user_bad(void) __attribute__((noreturn));
48 int __get_user_bad(void) __attribute__((noreturn));
49 
50 #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
51 
52 union oac {
53 	unsigned int val;
54 	struct {
55 		struct {
56 			unsigned short key : 4;
57 			unsigned short	   : 4;
58 			unsigned short as  : 2;
59 			unsigned short	   : 4;
60 			unsigned short k   : 1;
61 			unsigned short a   : 1;
62 		} oac1;
63 		struct {
64 			unsigned short key : 4;
65 			unsigned short	   : 4;
66 			unsigned short as  : 2;
67 			unsigned short	   : 4;
68 			unsigned short k   : 1;
69 			unsigned short a   : 1;
70 		} oac2;
71 	};
72 };
73 
74 #define __put_get_user_asm(to, from, size, oac_spec)			\
75 ({									\
76 	int __rc;							\
77 									\
78 	asm volatile(							\
79 		"	lr	0,%[spec]\n"				\
80 		"0:	mvcos	%[_to],%[_from],%[_size]\n"		\
81 		"1:	xr	%[rc],%[rc]\n"				\
82 		"2:\n"							\
83 		".pushsection .fixup, \"ax\"\n"				\
84 		"3:	lhi	%[rc],%[retval]\n"			\
85 		"	jg	2b\n"					\
86 		".popsection\n"						\
87 		EX_TABLE(0b,3b) EX_TABLE(1b,3b)				\
88 		: [rc] "=&d" (__rc), [_to] "+Q" (*(to))			\
89 		: [_size] "d" (size), [_from] "Q" (*(from)),		\
90 		  [retval] "K" (-EFAULT), [spec] "d" (oac_spec.val)	\
91 		: "cc", "0");						\
92 	__rc;								\
93 })
94 
95 #define __put_user_asm(to, from, size)				\
96 	__put_get_user_asm(to, from, size, ((union oac) {	\
97 		.oac1.as = PSW_BITS_AS_SECONDARY,		\
98 		.oac1.a = 1					\
99 	}))
100 
101 #define __get_user_asm(to, from, size)				\
102 	__put_get_user_asm(to, from, size, ((union oac) {	\
103 		.oac2.as = PSW_BITS_AS_SECONDARY,		\
104 		.oac2.a = 1					\
105 	}))							\
106 
107 static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
108 {
109 	int rc;
110 
111 	switch (size) {
112 	case 1:
113 		rc = __put_user_asm((unsigned char __user *)ptr,
114 				    (unsigned char *)x,
115 				    size);
116 		break;
117 	case 2:
118 		rc = __put_user_asm((unsigned short __user *)ptr,
119 				    (unsigned short *)x,
120 				    size);
121 		break;
122 	case 4:
123 		rc = __put_user_asm((unsigned int __user *)ptr,
124 				    (unsigned int *)x,
125 				    size);
126 		break;
127 	case 8:
128 		rc = __put_user_asm((unsigned long __user *)ptr,
129 				    (unsigned long *)x,
130 				    size);
131 		break;
132 	default:
133 		__put_user_bad();
134 		break;
135 	}
136 	return rc;
137 }
138 
139 static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
140 {
141 	int rc;
142 
143 	switch (size) {
144 	case 1:
145 		rc = __get_user_asm((unsigned char *)x,
146 				    (unsigned char __user *)ptr,
147 				    size);
148 		break;
149 	case 2:
150 		rc = __get_user_asm((unsigned short *)x,
151 				    (unsigned short __user *)ptr,
152 				    size);
153 		break;
154 	case 4:
155 		rc = __get_user_asm((unsigned int *)x,
156 				    (unsigned int __user *)ptr,
157 				    size);
158 		break;
159 	case 8:
160 		rc = __get_user_asm((unsigned long *)x,
161 				    (unsigned long __user *)ptr,
162 				    size);
163 		break;
164 	default:
165 		__get_user_bad();
166 		break;
167 	}
168 	return rc;
169 }
170 
171 #else /* CONFIG_HAVE_MARCH_Z10_FEATURES */
172 
173 static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
174 {
175 	size = raw_copy_to_user(ptr, x, size);
176 	return size ? -EFAULT : 0;
177 }
178 
179 static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
180 {
181 	size = raw_copy_from_user(x, ptr, size);
182 	return size ? -EFAULT : 0;
183 }
184 
185 #endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */
186 
187 /*
188  * These are the main single-value transfer routines.  They automatically
189  * use the right size if we just have the right pointer type.
190  */
191 #define __put_user(x, ptr) \
192 ({								\
193 	__typeof__(*(ptr)) __x = (x);				\
194 	int __pu_err = -EFAULT;					\
195         __chk_user_ptr(ptr);                                    \
196 	switch (sizeof (*(ptr))) {				\
197 	case 1:							\
198 	case 2:							\
199 	case 4:							\
200 	case 8:							\
201 		__pu_err = __put_user_fn(&__x, ptr,		\
202 					 sizeof(*(ptr)));	\
203 		break;						\
204 	default:						\
205 		__put_user_bad();				\
206 		break;						\
207 	}							\
208 	__builtin_expect(__pu_err, 0);				\
209 })
210 
211 #define put_user(x, ptr)					\
212 ({								\
213 	might_fault();						\
214 	__put_user(x, ptr);					\
215 })
216 
217 
218 #define __get_user(x, ptr)					\
219 ({								\
220 	int __gu_err = -EFAULT;					\
221 	__chk_user_ptr(ptr);					\
222 	switch (sizeof(*(ptr))) {				\
223 	case 1: {						\
224 		unsigned char __x = 0;				\
225 		__gu_err = __get_user_fn(&__x, ptr,		\
226 					 sizeof(*(ptr)));	\
227 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
228 		break;						\
229 	};							\
230 	case 2: {						\
231 		unsigned short __x = 0;				\
232 		__gu_err = __get_user_fn(&__x, ptr,		\
233 					 sizeof(*(ptr)));	\
234 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
235 		break;						\
236 	};							\
237 	case 4: {						\
238 		unsigned int __x = 0;				\
239 		__gu_err = __get_user_fn(&__x, ptr,		\
240 					 sizeof(*(ptr)));	\
241 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
242 		break;						\
243 	};							\
244 	case 8: {						\
245 		unsigned long long __x = 0;			\
246 		__gu_err = __get_user_fn(&__x, ptr,		\
247 					 sizeof(*(ptr)));	\
248 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
249 		break;						\
250 	};							\
251 	default:						\
252 		__get_user_bad();				\
253 		break;						\
254 	}							\
255 	__builtin_expect(__gu_err, 0);				\
256 })
257 
258 #define get_user(x, ptr)					\
259 ({								\
260 	might_fault();						\
261 	__get_user(x, ptr);					\
262 })
263 
264 /*
265  * Copy a null terminated string from userspace.
266  */
267 long __must_check strncpy_from_user(char *dst, const char __user *src, long count);
268 
269 long __must_check strnlen_user(const char __user *src, long count);
270 
271 /*
272  * Zero Userspace
273  */
274 unsigned long __must_check __clear_user(void __user *to, unsigned long size);
275 
276 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
277 {
278 	might_fault();
279 	return __clear_user(to, n);
280 }
281 
282 int copy_to_user_real(void __user *dest, void *src, unsigned long count);
283 void *s390_kernel_write(void *dst, const void *src, size_t size);
284 
285 #define HAVE_GET_KERNEL_NOFAULT
286 
287 int __noreturn __put_kernel_bad(void);
288 
289 #define __put_kernel_asm(val, to, insn)					\
290 ({									\
291 	int __rc;							\
292 									\
293 	asm volatile(							\
294 		"0:   " insn "  %2,%1\n"				\
295 		"1:	xr	%0,%0\n"				\
296 		"2:\n"							\
297 		".pushsection .fixup, \"ax\"\n"				\
298 		"3:	lhi	%0,%3\n"				\
299 		"	jg	2b\n"					\
300 		".popsection\n"						\
301 		EX_TABLE(0b,3b) EX_TABLE(1b,3b)				\
302 		: "=d" (__rc), "+Q" (*(to))				\
303 		: "d" (val), "K" (-EFAULT)				\
304 		: "cc");						\
305 	__rc;								\
306 })
307 
308 #define __put_kernel_nofault(dst, src, type, err_label)			\
309 do {									\
310 	u64 __x = (u64)(*((type *)(src)));				\
311 	int __pk_err;							\
312 									\
313 	switch (sizeof(type)) {						\
314 	case 1:								\
315 		__pk_err = __put_kernel_asm(__x, (type *)(dst), "stc"); \
316 		break;							\
317 	case 2:								\
318 		__pk_err = __put_kernel_asm(__x, (type *)(dst), "sth"); \
319 		break;							\
320 	case 4:								\
321 		__pk_err = __put_kernel_asm(__x, (type *)(dst), "st");	\
322 		break;							\
323 	case 8:								\
324 		__pk_err = __put_kernel_asm(__x, (type *)(dst), "stg"); \
325 		break;							\
326 	default:							\
327 		__pk_err = __put_kernel_bad();				\
328 		break;							\
329 	}								\
330 	if (unlikely(__pk_err))						\
331 		goto err_label;						\
332 } while (0)
333 
334 int __noreturn __get_kernel_bad(void);
335 
336 #define __get_kernel_asm(val, from, insn)				\
337 ({									\
338 	int __rc;							\
339 									\
340 	asm volatile(							\
341 		"0:   " insn "  %1,%2\n"				\
342 		"1:	xr	%0,%0\n"				\
343 		"2:\n"							\
344 		".pushsection .fixup, \"ax\"\n"				\
345 		"3:	lhi	%0,%3\n"				\
346 		"	jg	2b\n"					\
347 		".popsection\n"						\
348 		EX_TABLE(0b,3b) EX_TABLE(1b,3b)				\
349 		: "=d" (__rc), "+d" (val)				\
350 		: "Q" (*(from)), "K" (-EFAULT)				\
351 		: "cc");						\
352 	__rc;								\
353 })
354 
355 #define __get_kernel_nofault(dst, src, type, err_label)			\
356 do {									\
357 	int __gk_err;							\
358 									\
359 	switch (sizeof(type)) {						\
360 	case 1: {							\
361 		u8 __x = 0;						\
362 									\
363 		__gk_err = __get_kernel_asm(__x, (type *)(src), "ic");	\
364 		*((type *)(dst)) = (type)__x;				\
365 		break;							\
366 	};								\
367 	case 2: {							\
368 		u16 __x = 0;						\
369 									\
370 		__gk_err = __get_kernel_asm(__x, (type *)(src), "lh");	\
371 		*((type *)(dst)) = (type)__x;				\
372 		break;							\
373 	};								\
374 	case 4: {							\
375 		u32 __x = 0;						\
376 									\
377 		__gk_err = __get_kernel_asm(__x, (type *)(src), "l");	\
378 		*((type *)(dst)) = (type)__x;				\
379 		break;							\
380 	};								\
381 	case 8: {							\
382 		u64 __x = 0;						\
383 									\
384 		__gk_err = __get_kernel_asm(__x, (type *)(src), "lg");	\
385 		*((type *)(dst)) = (type)__x;				\
386 		break;							\
387 	};								\
388 	default:							\
389 		__gk_err = __get_kernel_bad();				\
390 		break;							\
391 	}								\
392 	if (unlikely(__gk_err))						\
393 		goto err_label;						\
394 } while (0)
395 
396 #endif /* __S390_UACCESS_H */
397