xref: /openbmc/linux/arch/m68k/include/asm/uaccess.h (revision bef7a78d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __M68K_UACCESS_H
3 #define __M68K_UACCESS_H
4 
5 #ifdef CONFIG_MMU
6 
7 /*
8  * User space memory access functions
9  */
10 #include <linux/compiler.h>
11 #include <linux/types.h>
12 #include <asm/segment.h>
13 #include <asm/extable.h>
14 
15 /* We let the MMU do all checking */
16 static inline int access_ok(const void __user *addr,
17 			    unsigned long size)
18 {
19 	return 1;
20 }
21 
22 /*
23  * Not all varients of the 68k family support the notion of address spaces.
24  * The traditional 680x0 parts do, and they use the sfc/dfc registers and
25  * the "moves" instruction to access user space from kernel space. Other
26  * family members like ColdFire don't support this, and only have a single
27  * address space, and use the usual "move" instruction for user space access.
28  *
29  * Outside of this difference the user space access functions are the same.
30  * So lets keep the code simple and just define in what we need to use.
31  */
32 #ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
33 #define	MOVES	"moves"
34 #else
35 #define	MOVES	"move"
36 #endif
37 
38 extern int __put_user_bad(void);
39 extern int __get_user_bad(void);
40 
41 #define __put_user_asm(res, x, ptr, bwl, reg, err)	\
42 asm volatile ("\n"					\
43 	"1:	"MOVES"."#bwl"	%2,%1\n"		\
44 	"2:\n"						\
45 	"	.section .fixup,\"ax\"\n"		\
46 	"	.even\n"				\
47 	"10:	moveq.l	%3,%0\n"			\
48 	"	jra 2b\n"				\
49 	"	.previous\n"				\
50 	"\n"						\
51 	"	.section __ex_table,\"a\"\n"		\
52 	"	.align	4\n"				\
53 	"	.long	1b,10b\n"			\
54 	"	.long	2b,10b\n"			\
55 	"	.previous"				\
56 	: "+d" (res), "=m" (*(ptr))			\
57 	: #reg (x), "i" (err))
58 
59 /*
60  * These are the main single-value transfer routines.  They automatically
61  * use the right size if we just have the right pointer type.
62  */
63 
64 #define __put_user(x, ptr)						\
65 ({									\
66 	typeof(*(ptr)) __pu_val = (x);					\
67 	int __pu_err = 0;						\
68 	__chk_user_ptr(ptr);						\
69 	switch (sizeof (*(ptr))) {					\
70 	case 1:								\
71 		__put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT);	\
72 		break;							\
73 	case 2:								\
74 		__put_user_asm(__pu_err, __pu_val, ptr, w, r, -EFAULT);	\
75 		break;							\
76 	case 4:								\
77 		__put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT);	\
78 		break;							\
79 	case 8:								\
80  	    {								\
81  		const void __user *__pu_ptr = (ptr);			\
82 		asm volatile ("\n"					\
83 			"1:	"MOVES".l	%2,(%1)+\n"		\
84 			"2:	"MOVES".l	%R2,(%1)\n"		\
85 			"3:\n"						\
86 			"	.section .fixup,\"ax\"\n"		\
87 			"	.even\n"				\
88 			"10:	movel %3,%0\n"				\
89 			"	jra 3b\n"				\
90 			"	.previous\n"				\
91 			"\n"						\
92 			"	.section __ex_table,\"a\"\n"		\
93 			"	.align 4\n"				\
94 			"	.long 1b,10b\n"				\
95 			"	.long 2b,10b\n"				\
96 			"	.long 3b,10b\n"				\
97 			"	.previous"				\
98 			: "+d" (__pu_err), "+a" (__pu_ptr)		\
99 			: "r" (__pu_val), "i" (-EFAULT)			\
100 			: "memory");					\
101 		break;							\
102 	    }								\
103 	default:							\
104 		__pu_err = __put_user_bad();				\
105 		break;							\
106 	}								\
107 	__pu_err;							\
108 })
109 #define put_user(x, ptr)	__put_user(x, ptr)
110 
111 
112 #define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({		\
113 	type __gu_val;							\
114 	asm volatile ("\n"						\
115 		"1:	"MOVES"."#bwl"	%2,%1\n"			\
116 		"2:\n"							\
117 		"	.section .fixup,\"ax\"\n"			\
118 		"	.even\n"					\
119 		"10:	move.l	%3,%0\n"				\
120 		"	sub.l	%1,%1\n"				\
121 		"	jra	2b\n"					\
122 		"	.previous\n"					\
123 		"\n"							\
124 		"	.section __ex_table,\"a\"\n"			\
125 		"	.align	4\n"					\
126 		"	.long	1b,10b\n"				\
127 		"	.previous"					\
128 		: "+d" (res), "=&" #reg (__gu_val)			\
129 		: "m" (*(ptr)), "i" (err));				\
130 	(x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val;	\
131 })
132 
133 #define __get_user(x, ptr)						\
134 ({									\
135 	int __gu_err = 0;						\
136 	__chk_user_ptr(ptr);						\
137 	switch (sizeof(*(ptr))) {					\
138 	case 1:								\
139 		__get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT);	\
140 		break;							\
141 	case 2:								\
142 		__get_user_asm(__gu_err, x, ptr, u16, w, r, -EFAULT);	\
143 		break;							\
144 	case 4:								\
145 		__get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT);	\
146 		break;							\
147 	case 8: {							\
148 		const void __user *__gu_ptr = (ptr);			\
149 		union {							\
150 			u64 l;						\
151 			__typeof__(*(ptr)) t;				\
152 		} __gu_val;						\
153 		asm volatile ("\n"					\
154 			"1:	"MOVES".l	(%2)+,%1\n"		\
155 			"2:	"MOVES".l	(%2),%R1\n"		\
156 			"3:\n"						\
157 			"	.section .fixup,\"ax\"\n"		\
158 			"	.even\n"				\
159 			"10:	move.l	%3,%0\n"			\
160 			"	sub.l	%1,%1\n"			\
161 			"	sub.l	%R1,%R1\n"			\
162 			"	jra	3b\n"				\
163 			"	.previous\n"				\
164 			"\n"						\
165 			"	.section __ex_table,\"a\"\n"		\
166 			"	.align	4\n"				\
167 			"	.long	1b,10b\n"			\
168 			"	.long	2b,10b\n"			\
169 			"	.previous"				\
170 			: "+d" (__gu_err), "=&r" (__gu_val.l),		\
171 			  "+a" (__gu_ptr)				\
172 			: "i" (-EFAULT)					\
173 			: "memory");					\
174 		(x) = __gu_val.t;					\
175 		break;							\
176 	}								\
177 	default:							\
178 		__gu_err = __get_user_bad();				\
179 		break;							\
180 	}								\
181 	__gu_err;							\
182 })
183 #define get_user(x, ptr) __get_user(x, ptr)
184 
185 unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n);
186 unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n);
187 
188 #define __suffix0
189 #define __suffix1 b
190 #define __suffix2 w
191 #define __suffix4 l
192 
193 #define ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\
194 	asm volatile ("\n"						\
195 		"1:	"MOVES"."#s1"	(%2)+,%3\n"			\
196 		"	move."#s1"	%3,(%1)+\n"			\
197 		"	.ifnc	\""#s2"\",\"\"\n"			\
198 		"2:	"MOVES"."#s2"	(%2)+,%3\n"			\
199 		"	move."#s2"	%3,(%1)+\n"			\
200 		"	.ifnc	\""#s3"\",\"\"\n"			\
201 		"3:	"MOVES"."#s3"	(%2)+,%3\n"			\
202 		"	move."#s3"	%3,(%1)+\n"			\
203 		"	.endif\n"					\
204 		"	.endif\n"					\
205 		"4:\n"							\
206 		"	.section __ex_table,\"a\"\n"			\
207 		"	.align	4\n"					\
208 		"	.long	1b,10f\n"				\
209 		"	.ifnc	\""#s2"\",\"\"\n"			\
210 		"	.long	2b,20f\n"				\
211 		"	.ifnc	\""#s3"\",\"\"\n"			\
212 		"	.long	3b,30f\n"				\
213 		"	.endif\n"					\
214 		"	.endif\n"					\
215 		"	.previous\n"					\
216 		"\n"							\
217 		"	.section .fixup,\"ax\"\n"			\
218 		"	.even\n"					\
219 		"10:	addq.l #"#n1",%0\n"				\
220 		"	.ifnc	\""#s2"\",\"\"\n"			\
221 		"20:	addq.l #"#n2",%0\n"				\
222 		"	.ifnc	\""#s3"\",\"\"\n"			\
223 		"30:	addq.l #"#n3",%0\n"				\
224 		"	.endif\n"					\
225 		"	.endif\n"					\
226 		"	jra	4b\n"					\
227 		"	.previous\n"					\
228 		: "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp)	\
229 		: : "memory")
230 
231 #define ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\
232 	____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)
233 #define __constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3)	\
234 	___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3,  \
235 					__suffix##n1, __suffix##n2, __suffix##n3)
236 
237 static __always_inline unsigned long
238 __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
239 {
240 	unsigned long res = 0, tmp;
241 
242 	switch (n) {
243 	case 1:
244 		__constant_copy_from_user_asm(res, to, from, tmp, 1, 0, 0);
245 		break;
246 	case 2:
247 		__constant_copy_from_user_asm(res, to, from, tmp, 2, 0, 0);
248 		break;
249 	case 3:
250 		__constant_copy_from_user_asm(res, to, from, tmp, 2, 1, 0);
251 		break;
252 	case 4:
253 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 0, 0);
254 		break;
255 	case 5:
256 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 1, 0);
257 		break;
258 	case 6:
259 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 0);
260 		break;
261 	case 7:
262 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 1);
263 		break;
264 	case 8:
265 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 0);
266 		break;
267 	case 9:
268 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 1);
269 		break;
270 	case 10:
271 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 2);
272 		break;
273 	case 12:
274 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 4);
275 		break;
276 	default:
277 		/* we limit the inlined version to 3 moves */
278 		return __generic_copy_from_user(to, from, n);
279 	}
280 
281 	return res;
282 }
283 
284 #define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3)	\
285 	asm volatile ("\n"						\
286 		"	move."#s1"	(%2)+,%3\n"			\
287 		"11:	"MOVES"."#s1"	%3,(%1)+\n"			\
288 		"12:	move."#s2"	(%2)+,%3\n"			\
289 		"21:	"MOVES"."#s2"	%3,(%1)+\n"			\
290 		"22:\n"							\
291 		"	.ifnc	\""#s3"\",\"\"\n"			\
292 		"	move."#s3"	(%2)+,%3\n"			\
293 		"31:	"MOVES"."#s3"	%3,(%1)+\n"			\
294 		"32:\n"							\
295 		"	.endif\n"					\
296 		"4:\n"							\
297 		"\n"							\
298 		"	.section __ex_table,\"a\"\n"			\
299 		"	.align	4\n"					\
300 		"	.long	11b,5f\n"				\
301 		"	.long	12b,5f\n"				\
302 		"	.long	21b,5f\n"				\
303 		"	.long	22b,5f\n"				\
304 		"	.ifnc	\""#s3"\",\"\"\n"			\
305 		"	.long	31b,5f\n"				\
306 		"	.long	32b,5f\n"				\
307 		"	.endif\n"					\
308 		"	.previous\n"					\
309 		"\n"							\
310 		"	.section .fixup,\"ax\"\n"			\
311 		"	.even\n"					\
312 		"5:	moveq.l	#"#n",%0\n"				\
313 		"	jra	4b\n"					\
314 		"	.previous\n"					\
315 		: "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp)	\
316 		: : "memory")
317 
318 static __always_inline unsigned long
319 __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
320 {
321 	unsigned long res = 0, tmp;
322 
323 	switch (n) {
324 	case 1:
325 		__put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1);
326 		break;
327 	case 2:
328 		__put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, r, 2);
329 		break;
330 	case 3:
331 		__constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,);
332 		break;
333 	case 4:
334 		__put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4);
335 		break;
336 	case 5:
337 		__constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,);
338 		break;
339 	case 6:
340 		__constant_copy_to_user_asm(res, to, from, tmp, 6, l, w,);
341 		break;
342 	case 7:
343 		__constant_copy_to_user_asm(res, to, from, tmp, 7, l, w, b);
344 		break;
345 	case 8:
346 		__constant_copy_to_user_asm(res, to, from, tmp, 8, l, l,);
347 		break;
348 	case 9:
349 		__constant_copy_to_user_asm(res, to, from, tmp, 9, l, l, b);
350 		break;
351 	case 10:
352 		__constant_copy_to_user_asm(res, to, from, tmp, 10, l, l, w);
353 		break;
354 	case 12:
355 		__constant_copy_to_user_asm(res, to, from, tmp, 12, l, l, l);
356 		break;
357 	default:
358 		/* limit the inlined version to 3 moves */
359 		return __generic_copy_to_user(to, from, n);
360 	}
361 
362 	return res;
363 }
364 
365 static inline unsigned long
366 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
367 {
368 	if (__builtin_constant_p(n))
369 		return __constant_copy_from_user(to, from, n);
370 	return __generic_copy_from_user(to, from, n);
371 }
372 
373 static inline unsigned long
374 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
375 {
376 	if (__builtin_constant_p(n))
377 		return __constant_copy_to_user(to, from, n);
378 	return __generic_copy_to_user(to, from, n);
379 }
380 #define INLINE_COPY_FROM_USER
381 #define INLINE_COPY_TO_USER
382 
383 #define user_addr_max() \
384 	(uaccess_kernel() ? ~0UL : TASK_SIZE)
385 
386 extern long strncpy_from_user(char *dst, const char __user *src, long count);
387 extern __must_check long strnlen_user(const char __user *str, long n);
388 
389 unsigned long __clear_user(void __user *to, unsigned long n);
390 
391 #define clear_user	__clear_user
392 
393 #else /* !CONFIG_MMU */
394 #include <asm-generic/uaccess.h>
395 #endif
396 
397 #endif /* _M68K_UACCESS_H */
398