xref: /openbmc/linux/arch/s390/include/asm/uaccess.h (revision e368cd72)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999, 2000
5  *    Author(s): Hartmut Penner (hp@de.ibm.com),
6  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
7  *
8  *  Derived from "include/asm-i386/uaccess.h"
9  */
10 #ifndef __S390_UACCESS_H
11 #define __S390_UACCESS_H
12 
13 /*
14  * User space memory access functions
15  */
16 #include <asm/processor.h>
17 #include <asm/ctl_reg.h>
18 #include <asm/extable.h>
19 #include <asm/facility.h>
20 
21 void debug_user_asce(int exit);
22 
23 static inline int __range_ok(unsigned long addr, unsigned long size)
24 {
25 	return 1;
26 }
27 
28 #define __access_ok(addr, size)				\
29 ({							\
30 	__chk_user_ptr(addr);				\
31 	__range_ok((unsigned long)(addr), (size));	\
32 })
33 
34 #define access_ok(addr, size) __access_ok(addr, size)
35 
36 unsigned long __must_check
37 raw_copy_from_user(void *to, const void __user *from, unsigned long n);
38 
39 unsigned long __must_check
40 raw_copy_to_user(void __user *to, const void *from, unsigned long n);
41 
42 #ifndef CONFIG_KASAN
43 #define INLINE_COPY_FROM_USER
44 #define INLINE_COPY_TO_USER
45 #endif
46 
47 int __put_user_bad(void) __attribute__((noreturn));
48 int __get_user_bad(void) __attribute__((noreturn));
49 
50 #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
51 
52 #define __put_get_user_asm(to, from, size, insn)		\
53 ({								\
54 	int __rc;						\
55 								\
56 	asm volatile(						\
57 		insn "		0,%[spec]\n"			\
58 		"0:	mvcos	%[_to],%[_from],%[_size]\n"	\
59 		"1:	xr	%[rc],%[rc]\n"			\
60 		"2:\n"						\
61 		".pushsection .fixup, \"ax\"\n"			\
62 		"3:	lhi	%[rc],%[retval]\n"		\
63 		"	jg	2b\n"				\
64 		".popsection\n"					\
65 		EX_TABLE(0b,3b) EX_TABLE(1b,3b)			\
66 		: [rc] "=&d" (__rc), [_to] "+Q" (*(to))		\
67 		: [_size] "d" (size), [_from] "Q" (*(from)),	\
68 		  [retval] "K" (-EFAULT), [spec] "K" (0x81UL)	\
69 		: "cc", "0");					\
70 	__rc;							\
71 })
72 
73 static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
74 {
75 	int rc;
76 
77 	switch (size) {
78 	case 1:
79 		rc = __put_get_user_asm((unsigned char __user *)ptr,
80 					(unsigned char *)x,
81 					size, "llilh");
82 		break;
83 	case 2:
84 		rc = __put_get_user_asm((unsigned short __user *)ptr,
85 					(unsigned short *)x,
86 					size, "llilh");
87 		break;
88 	case 4:
89 		rc = __put_get_user_asm((unsigned int __user *)ptr,
90 					(unsigned int *)x,
91 					size, "llilh");
92 		break;
93 	case 8:
94 		rc = __put_get_user_asm((unsigned long __user *)ptr,
95 					(unsigned long *)x,
96 					size, "llilh");
97 		break;
98 	default:
99 		__put_user_bad();
100 		break;
101 	}
102 	return rc;
103 }
104 
105 static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
106 {
107 	int rc;
108 
109 	switch (size) {
110 	case 1:
111 		rc = __put_get_user_asm((unsigned char *)x,
112 					(unsigned char __user *)ptr,
113 					size, "lghi");
114 		break;
115 	case 2:
116 		rc = __put_get_user_asm((unsigned short *)x,
117 					(unsigned short __user *)ptr,
118 					size, "lghi");
119 		break;
120 	case 4:
121 		rc = __put_get_user_asm((unsigned int *)x,
122 					(unsigned int __user *)ptr,
123 					size, "lghi");
124 		break;
125 	case 8:
126 		rc = __put_get_user_asm((unsigned long *)x,
127 					(unsigned long __user *)ptr,
128 					size, "lghi");
129 		break;
130 	default:
131 		__get_user_bad();
132 		break;
133 	}
134 	return rc;
135 }
136 
137 #else /* CONFIG_HAVE_MARCH_Z10_FEATURES */
138 
139 static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
140 {
141 	size = raw_copy_to_user(ptr, x, size);
142 	return size ? -EFAULT : 0;
143 }
144 
145 static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
146 {
147 	size = raw_copy_from_user(x, ptr, size);
148 	return size ? -EFAULT : 0;
149 }
150 
151 #endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */
152 
153 /*
154  * These are the main single-value transfer routines.  They automatically
155  * use the right size if we just have the right pointer type.
156  */
157 #define __put_user(x, ptr) \
158 ({								\
159 	__typeof__(*(ptr)) __x = (x);				\
160 	int __pu_err = -EFAULT;					\
161         __chk_user_ptr(ptr);                                    \
162 	switch (sizeof (*(ptr))) {				\
163 	case 1:							\
164 	case 2:							\
165 	case 4:							\
166 	case 8:							\
167 		__pu_err = __put_user_fn(&__x, ptr,		\
168 					 sizeof(*(ptr)));	\
169 		break;						\
170 	default:						\
171 		__put_user_bad();				\
172 		break;						\
173 	}							\
174 	__builtin_expect(__pu_err, 0);				\
175 })
176 
177 #define put_user(x, ptr)					\
178 ({								\
179 	might_fault();						\
180 	__put_user(x, ptr);					\
181 })
182 
183 
184 #define __get_user(x, ptr)					\
185 ({								\
186 	int __gu_err = -EFAULT;					\
187 	__chk_user_ptr(ptr);					\
188 	switch (sizeof(*(ptr))) {				\
189 	case 1: {						\
190 		unsigned char __x = 0;				\
191 		__gu_err = __get_user_fn(&__x, ptr,		\
192 					 sizeof(*(ptr)));	\
193 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
194 		break;						\
195 	};							\
196 	case 2: {						\
197 		unsigned short __x = 0;				\
198 		__gu_err = __get_user_fn(&__x, ptr,		\
199 					 sizeof(*(ptr)));	\
200 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
201 		break;						\
202 	};							\
203 	case 4: {						\
204 		unsigned int __x = 0;				\
205 		__gu_err = __get_user_fn(&__x, ptr,		\
206 					 sizeof(*(ptr)));	\
207 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
208 		break;						\
209 	};							\
210 	case 8: {						\
211 		unsigned long long __x = 0;			\
212 		__gu_err = __get_user_fn(&__x, ptr,		\
213 					 sizeof(*(ptr)));	\
214 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
215 		break;						\
216 	};							\
217 	default:						\
218 		__get_user_bad();				\
219 		break;						\
220 	}							\
221 	__builtin_expect(__gu_err, 0);				\
222 })
223 
224 #define get_user(x, ptr)					\
225 ({								\
226 	might_fault();						\
227 	__get_user(x, ptr);					\
228 })
229 
230 unsigned long __must_check
231 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n);
232 
233 /*
234  * Copy a null terminated string from userspace.
235  */
236 long __must_check strncpy_from_user(char *dst, const char __user *src, long count);
237 
238 long __must_check strnlen_user(const char __user *src, long count);
239 
240 /*
241  * Zero Userspace
242  */
243 unsigned long __must_check __clear_user(void __user *to, unsigned long size);
244 
245 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
246 {
247 	might_fault();
248 	return __clear_user(to, n);
249 }
250 
251 int copy_to_user_real(void __user *dest, void *src, unsigned long count);
252 void *s390_kernel_write(void *dst, const void *src, size_t size);
253 
254 #define HAVE_GET_KERNEL_NOFAULT
255 
256 int __noreturn __put_kernel_bad(void);
257 
258 #define __put_kernel_asm(val, to, insn)					\
259 ({									\
260 	int __rc;							\
261 									\
262 	asm volatile(							\
263 		"0:   " insn "  %2,%1\n"				\
264 		"1:	xr	%0,%0\n"				\
265 		"2:\n"							\
266 		".pushsection .fixup, \"ax\"\n"				\
267 		"3:	lhi	%0,%3\n"				\
268 		"	jg	2b\n"					\
269 		".popsection\n"						\
270 		EX_TABLE(0b,3b) EX_TABLE(1b,3b)				\
271 		: "=d" (__rc), "+Q" (*(to))				\
272 		: "d" (val), "K" (-EFAULT)				\
273 		: "cc");						\
274 	__rc;								\
275 })
276 
277 #define __put_kernel_nofault(dst, src, type, err_label)			\
278 do {									\
279 	u64 __x = (u64)(*((type *)(src)));				\
280 	int __pk_err;							\
281 									\
282 	switch (sizeof(type)) {						\
283 	case 1:								\
284 		__pk_err = __put_kernel_asm(__x, (type *)(dst), "stc"); \
285 		break;							\
286 	case 2:								\
287 		__pk_err = __put_kernel_asm(__x, (type *)(dst), "sth"); \
288 		break;							\
289 	case 4:								\
290 		__pk_err = __put_kernel_asm(__x, (type *)(dst), "st");	\
291 		break;							\
292 	case 8:								\
293 		__pk_err = __put_kernel_asm(__x, (type *)(dst), "stg"); \
294 		break;							\
295 	default:							\
296 		__pk_err = __put_kernel_bad();				\
297 		break;							\
298 	}								\
299 	if (unlikely(__pk_err))						\
300 		goto err_label;						\
301 } while (0)
302 
303 int __noreturn __get_kernel_bad(void);
304 
305 #define __get_kernel_asm(val, from, insn)				\
306 ({									\
307 	int __rc;							\
308 									\
309 	asm volatile(							\
310 		"0:   " insn "  %1,%2\n"				\
311 		"1:	xr	%0,%0\n"				\
312 		"2:\n"							\
313 		".pushsection .fixup, \"ax\"\n"				\
314 		"3:	lhi	%0,%3\n"				\
315 		"	jg	2b\n"					\
316 		".popsection\n"						\
317 		EX_TABLE(0b,3b) EX_TABLE(1b,3b)				\
318 		: "=d" (__rc), "+d" (val)				\
319 		: "Q" (*(from)), "K" (-EFAULT)				\
320 		: "cc");						\
321 	__rc;								\
322 })
323 
324 #define __get_kernel_nofault(dst, src, type, err_label)			\
325 do {									\
326 	int __gk_err;							\
327 									\
328 	switch (sizeof(type)) {						\
329 	case 1: {							\
330 		u8 __x = 0;						\
331 									\
332 		__gk_err = __get_kernel_asm(__x, (type *)(src), "ic");	\
333 		*((type *)(dst)) = (type)__x;				\
334 		break;							\
335 	};								\
336 	case 2: {							\
337 		u16 __x = 0;						\
338 									\
339 		__gk_err = __get_kernel_asm(__x, (type *)(src), "lh");	\
340 		*((type *)(dst)) = (type)__x;				\
341 		break;							\
342 	};								\
343 	case 4: {							\
344 		u32 __x = 0;						\
345 									\
346 		__gk_err = __get_kernel_asm(__x, (type *)(src), "l");	\
347 		*((type *)(dst)) = (type)__x;				\
348 		break;							\
349 	};								\
350 	case 8: {							\
351 		u64 __x = 0;						\
352 									\
353 		__gk_err = __get_kernel_asm(__x, (type *)(src), "lg");	\
354 		*((type *)(dst)) = (type)__x;				\
355 		break;							\
356 	};								\
357 	default:							\
358 		__gk_err = __get_kernel_bad();				\
359 		break;							\
360 	}								\
361 	if (unlikely(__gk_err))						\
362 		goto err_label;						\
363 } while (0)
364 
365 #endif /* __S390_UACCESS_H */
366