xref: /openbmc/linux/arch/m68k/include/asm/uaccess.h (revision c4a11bf4)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __M68K_UACCESS_H
3 #define __M68K_UACCESS_H
4 
5 #ifdef CONFIG_MMU
6 
7 /*
8  * User space memory access functions
9  */
10 #include <linux/compiler.h>
11 #include <linux/types.h>
12 #include <asm/extable.h>
13 
14 /* We let the MMU do all checking */
15 static inline int access_ok(const void __user *addr,
16 			    unsigned long size)
17 {
18 	/*
19 	 * XXX: for !CONFIG_CPU_HAS_ADDRESS_SPACES this really needs to check
20 	 * for TASK_SIZE!
21 	 */
22 	return 1;
23 }
24 
25 /*
26  * Not all varients of the 68k family support the notion of address spaces.
27  * The traditional 680x0 parts do, and they use the sfc/dfc registers and
28  * the "moves" instruction to access user space from kernel space. Other
29  * family members like ColdFire don't support this, and only have a single
30  * address space, and use the usual "move" instruction for user space access.
31  *
32  * Outside of this difference the user space access functions are the same.
33  * So lets keep the code simple and just define in what we need to use.
34  */
35 #ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
36 #define	MOVES	"moves"
37 #else
38 #define	MOVES	"move"
39 #endif
40 
41 #define __put_user_asm(inst, res, x, ptr, bwl, reg, err) \
42 asm volatile ("\n"					\
43 	"1:	"inst"."#bwl"	%2,%1\n"		\
44 	"2:\n"						\
45 	"	.section .fixup,\"ax\"\n"		\
46 	"	.even\n"				\
47 	"10:	moveq.l	%3,%0\n"			\
48 	"	jra 2b\n"				\
49 	"	.previous\n"				\
50 	"\n"						\
51 	"	.section __ex_table,\"a\"\n"		\
52 	"	.align	4\n"				\
53 	"	.long	1b,10b\n"			\
54 	"	.long	2b,10b\n"			\
55 	"	.previous"				\
56 	: "+d" (res), "=m" (*(ptr))			\
57 	: #reg (x), "i" (err))
58 
59 #define __put_user_asm8(inst, res, x, ptr)			\
60 do {								\
61 	const void *__pu_ptr = (const void __force *)(ptr);	\
62 								\
63 	asm volatile ("\n"					\
64 		"1:	"inst".l %2,(%1)+\n"			\
65 		"2:	"inst".l %R2,(%1)\n"			\
66 		"3:\n"						\
67 		"	.section .fixup,\"ax\"\n"		\
68 		"	.even\n"				\
69 		"10:	movel %3,%0\n"				\
70 		"	jra 3b\n"				\
71 		"	.previous\n"				\
72 		"\n"						\
73 		"	.section __ex_table,\"a\"\n"		\
74 		"	.align 4\n"				\
75 		"	.long 1b,10b\n"				\
76 		"	.long 2b,10b\n"				\
77 		"	.long 3b,10b\n"				\
78 		"	.previous"				\
79 		: "+d" (res), "+a" (__pu_ptr)			\
80 		: "r" (x), "i" (-EFAULT)			\
81 		: "memory");					\
82 } while (0)
83 
84 /*
85  * These are the main single-value transfer routines.  They automatically
86  * use the right size if we just have the right pointer type.
87  */
88 
89 #define __put_user(x, ptr)						\
90 ({									\
91 	typeof(*(ptr)) __pu_val = (x);					\
92 	int __pu_err = 0;						\
93 	__chk_user_ptr(ptr);						\
94 	switch (sizeof (*(ptr))) {					\
95 	case 1:								\
96 		__put_user_asm(MOVES, __pu_err, __pu_val, ptr, b, d, -EFAULT); \
97 		break;							\
98 	case 2:								\
99 		__put_user_asm(MOVES, __pu_err, __pu_val, ptr, w, r, -EFAULT); \
100 		break;							\
101 	case 4:								\
102 		__put_user_asm(MOVES, __pu_err, __pu_val, ptr, l, r, -EFAULT); \
103 		break;							\
104 	case 8:								\
105 		__put_user_asm8(MOVES, __pu_err, __pu_val, ptr);	\
106 		break;							\
107 	default:							\
108 		BUILD_BUG();						\
109 	}								\
110 	__pu_err;							\
111 })
112 #define put_user(x, ptr)	__put_user(x, ptr)
113 
114 
115 #define __get_user_asm(inst, res, x, ptr, type, bwl, reg, err) ({	\
116 	type __gu_val;							\
117 	asm volatile ("\n"						\
118 		"1:	"inst"."#bwl"	%2,%1\n"			\
119 		"2:\n"							\
120 		"	.section .fixup,\"ax\"\n"			\
121 		"	.even\n"					\
122 		"10:	move.l	%3,%0\n"				\
123 		"	sub.l	%1,%1\n"				\
124 		"	jra	2b\n"					\
125 		"	.previous\n"					\
126 		"\n"							\
127 		"	.section __ex_table,\"a\"\n"			\
128 		"	.align	4\n"					\
129 		"	.long	1b,10b\n"				\
130 		"	.previous"					\
131 		: "+d" (res), "=&" #reg (__gu_val)			\
132 		: "m" (*(ptr)), "i" (err));				\
133 	(x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val;	\
134 })
135 
136 #define __get_user_asm8(inst, res, x, ptr) 				\
137 do {									\
138 	const void *__gu_ptr = (const void __force *)(ptr);		\
139 	union {								\
140 		u64 l;							\
141 		__typeof__(*(ptr)) t;					\
142 	} __gu_val;							\
143 									\
144 	asm volatile ("\n"						\
145 		"1:	"inst".l (%2)+,%1\n"				\
146 		"2:	"inst".l (%2),%R1\n"				\
147 		"3:\n"							\
148 		"	.section .fixup,\"ax\"\n"			\
149 		"	.even\n"					\
150 		"10:	move.l	%3,%0\n"				\
151 		"	sub.l	%1,%1\n"				\
152 		"	sub.l	%R1,%R1\n"				\
153 		"	jra	3b\n"					\
154 		"	.previous\n"					\
155 		"\n"							\
156 		"	.section __ex_table,\"a\"\n"			\
157 		"	.align	4\n"					\
158 		"	.long	1b,10b\n"				\
159 		"	.long	2b,10b\n"				\
160 		"	.previous"					\
161 		: "+d" (res), "=&r" (__gu_val.l),			\
162 		  "+a" (__gu_ptr)					\
163 		: "i" (-EFAULT)						\
164 		: "memory");						\
165 	(x) = __gu_val.t;						\
166 } while (0)
167 
168 #define __get_user(x, ptr)						\
169 ({									\
170 	int __gu_err = 0;						\
171 	__chk_user_ptr(ptr);						\
172 	switch (sizeof(*(ptr))) {					\
173 	case 1:								\
174 		__get_user_asm(MOVES, __gu_err, x, ptr, u8, b, d, -EFAULT); \
175 		break;							\
176 	case 2:								\
177 		__get_user_asm(MOVES, __gu_err, x, ptr, u16, w, r, -EFAULT); \
178 		break;							\
179 	case 4:								\
180 		__get_user_asm(MOVES, __gu_err, x, ptr, u32, l, r, -EFAULT); \
181 		break;							\
182 	case 8:								\
183 		__get_user_asm8(MOVES, __gu_err, x, ptr);		\
184 		break;							\
185 	default:							\
186 		BUILD_BUG();						\
187 	}								\
188 	__gu_err;							\
189 })
190 #define get_user(x, ptr) __get_user(x, ptr)
191 
192 unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n);
193 unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n);
194 
195 #define __suffix0
196 #define __suffix1 b
197 #define __suffix2 w
198 #define __suffix4 l
199 
200 #define ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\
201 	asm volatile ("\n"						\
202 		"1:	"MOVES"."#s1"	(%2)+,%3\n"			\
203 		"	move."#s1"	%3,(%1)+\n"			\
204 		"	.ifnc	\""#s2"\",\"\"\n"			\
205 		"2:	"MOVES"."#s2"	(%2)+,%3\n"			\
206 		"	move."#s2"	%3,(%1)+\n"			\
207 		"	.ifnc	\""#s3"\",\"\"\n"			\
208 		"3:	"MOVES"."#s3"	(%2)+,%3\n"			\
209 		"	move."#s3"	%3,(%1)+\n"			\
210 		"	.endif\n"					\
211 		"	.endif\n"					\
212 		"4:\n"							\
213 		"	.section __ex_table,\"a\"\n"			\
214 		"	.align	4\n"					\
215 		"	.long	1b,10f\n"				\
216 		"	.ifnc	\""#s2"\",\"\"\n"			\
217 		"	.long	2b,20f\n"				\
218 		"	.ifnc	\""#s3"\",\"\"\n"			\
219 		"	.long	3b,30f\n"				\
220 		"	.endif\n"					\
221 		"	.endif\n"					\
222 		"	.previous\n"					\
223 		"\n"							\
224 		"	.section .fixup,\"ax\"\n"			\
225 		"	.even\n"					\
226 		"10:	addq.l #"#n1",%0\n"				\
227 		"	.ifnc	\""#s2"\",\"\"\n"			\
228 		"20:	addq.l #"#n2",%0\n"				\
229 		"	.ifnc	\""#s3"\",\"\"\n"			\
230 		"30:	addq.l #"#n3",%0\n"				\
231 		"	.endif\n"					\
232 		"	.endif\n"					\
233 		"	jra	4b\n"					\
234 		"	.previous\n"					\
235 		: "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp)	\
236 		: : "memory")
237 
238 #define ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\
239 	____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)
240 #define __constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3)	\
241 	___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3,  \
242 					__suffix##n1, __suffix##n2, __suffix##n3)
243 
244 static __always_inline unsigned long
245 __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
246 {
247 	unsigned long res = 0, tmp;
248 
249 	switch (n) {
250 	case 1:
251 		__constant_copy_from_user_asm(res, to, from, tmp, 1, 0, 0);
252 		break;
253 	case 2:
254 		__constant_copy_from_user_asm(res, to, from, tmp, 2, 0, 0);
255 		break;
256 	case 3:
257 		__constant_copy_from_user_asm(res, to, from, tmp, 2, 1, 0);
258 		break;
259 	case 4:
260 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 0, 0);
261 		break;
262 	case 5:
263 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 1, 0);
264 		break;
265 	case 6:
266 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 0);
267 		break;
268 	case 7:
269 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 1);
270 		break;
271 	case 8:
272 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 0);
273 		break;
274 	case 9:
275 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 1);
276 		break;
277 	case 10:
278 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 2);
279 		break;
280 	case 12:
281 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 4);
282 		break;
283 	default:
284 		/* we limit the inlined version to 3 moves */
285 		return __generic_copy_from_user(to, from, n);
286 	}
287 
288 	return res;
289 }
290 
291 #define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3)	\
292 	asm volatile ("\n"						\
293 		"	move."#s1"	(%2)+,%3\n"			\
294 		"11:	"MOVES"."#s1"	%3,(%1)+\n"			\
295 		"12:	move."#s2"	(%2)+,%3\n"			\
296 		"21:	"MOVES"."#s2"	%3,(%1)+\n"			\
297 		"22:\n"							\
298 		"	.ifnc	\""#s3"\",\"\"\n"			\
299 		"	move."#s3"	(%2)+,%3\n"			\
300 		"31:	"MOVES"."#s3"	%3,(%1)+\n"			\
301 		"32:\n"							\
302 		"	.endif\n"					\
303 		"4:\n"							\
304 		"\n"							\
305 		"	.section __ex_table,\"a\"\n"			\
306 		"	.align	4\n"					\
307 		"	.long	11b,5f\n"				\
308 		"	.long	12b,5f\n"				\
309 		"	.long	21b,5f\n"				\
310 		"	.long	22b,5f\n"				\
311 		"	.ifnc	\""#s3"\",\"\"\n"			\
312 		"	.long	31b,5f\n"				\
313 		"	.long	32b,5f\n"				\
314 		"	.endif\n"					\
315 		"	.previous\n"					\
316 		"\n"							\
317 		"	.section .fixup,\"ax\"\n"			\
318 		"	.even\n"					\
319 		"5:	moveq.l	#"#n",%0\n"				\
320 		"	jra	4b\n"					\
321 		"	.previous\n"					\
322 		: "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp)	\
323 		: : "memory")
324 
325 static __always_inline unsigned long
326 __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
327 {
328 	unsigned long res = 0, tmp;
329 
330 	switch (n) {
331 	case 1:
332 		__put_user_asm(MOVES, res, *(u8 *)from, (u8 __user *)to,
333 				b, d, 1);
334 		break;
335 	case 2:
336 		__put_user_asm(MOVES, res, *(u16 *)from, (u16 __user *)to,
337 				w, r, 2);
338 		break;
339 	case 3:
340 		__constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,);
341 		break;
342 	case 4:
343 		__put_user_asm(MOVES, res, *(u32 *)from, (u32 __user *)to,
344 				l, r, 4);
345 		break;
346 	case 5:
347 		__constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,);
348 		break;
349 	case 6:
350 		__constant_copy_to_user_asm(res, to, from, tmp, 6, l, w,);
351 		break;
352 	case 7:
353 		__constant_copy_to_user_asm(res, to, from, tmp, 7, l, w, b);
354 		break;
355 	case 8:
356 		__constant_copy_to_user_asm(res, to, from, tmp, 8, l, l,);
357 		break;
358 	case 9:
359 		__constant_copy_to_user_asm(res, to, from, tmp, 9, l, l, b);
360 		break;
361 	case 10:
362 		__constant_copy_to_user_asm(res, to, from, tmp, 10, l, l, w);
363 		break;
364 	case 12:
365 		__constant_copy_to_user_asm(res, to, from, tmp, 12, l, l, l);
366 		break;
367 	default:
368 		/* limit the inlined version to 3 moves */
369 		return __generic_copy_to_user(to, from, n);
370 	}
371 
372 	return res;
373 }
374 
375 static inline unsigned long
376 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
377 {
378 	if (__builtin_constant_p(n))
379 		return __constant_copy_from_user(to, from, n);
380 	return __generic_copy_from_user(to, from, n);
381 }
382 
383 static inline unsigned long
384 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
385 {
386 	if (__builtin_constant_p(n))
387 		return __constant_copy_to_user(to, from, n);
388 	return __generic_copy_to_user(to, from, n);
389 }
390 #define INLINE_COPY_FROM_USER
391 #define INLINE_COPY_TO_USER
392 
393 #define HAVE_GET_KERNEL_NOFAULT
394 
395 #define __get_kernel_nofault(dst, src, type, err_label)			\
396 do {									\
397 	type *__gk_dst = (type *)(dst);					\
398 	type *__gk_src = (type *)(src);					\
399 	int __gk_err = 0;						\
400 									\
401 	switch (sizeof(type)) {						\
402 	case 1:								\
403 		__get_user_asm("move", __gk_err, *__gk_dst, __gk_src,	\
404 				u8, b, d, -EFAULT);			\
405 		break;							\
406 	case 2:								\
407 		__get_user_asm("move", __gk_err, *__gk_dst, __gk_src,	\
408 				u16, w, r, -EFAULT);			\
409 		break;							\
410 	case 4:								\
411 		__get_user_asm("move", __gk_err, *__gk_dst, __gk_src,	\
412 				u32, l, r, -EFAULT);			\
413 		break;							\
414 	case 8:								\
415 		__get_user_asm8("move", __gk_err, *__gk_dst, __gk_src);	\
416 		break;							\
417 	default:							\
418 		BUILD_BUG();						\
419 	}								\
420 	if (unlikely(__gk_err))						\
421 		goto err_label;						\
422 } while (0)
423 
424 #define __put_kernel_nofault(dst, src, type, err_label)			\
425 do {									\
426 	type __pk_src = *(type *)(src);					\
427 	type *__pk_dst = (type *)(dst);					\
428 	int __pk_err = 0;						\
429 									\
430 	switch (sizeof(type)) {						\
431 	case 1:								\
432 		__put_user_asm("move", __pk_err, __pk_src, __pk_dst,	\
433 				b, d, -EFAULT);				\
434 		break;							\
435 	case 2:								\
436 		__put_user_asm("move", __pk_err, __pk_src, __pk_dst,	\
437 				w, r, -EFAULT);				\
438 		break;							\
439 	case 4:								\
440 		__put_user_asm("move", __pk_err, __pk_src, __pk_dst,	\
441 				l, r, -EFAULT);				\
442 		break;							\
443 	case 8:								\
444 		__put_user_asm8("move", __pk_err, __pk_src, __pk_dst);	\
445 		break;							\
446 	default:							\
447 		BUILD_BUG();						\
448 	}								\
449 	if (unlikely(__pk_err))						\
450 		goto err_label;						\
451 } while (0)
452 
453 extern long strncpy_from_user(char *dst, const char __user *src, long count);
454 extern __must_check long strnlen_user(const char __user *str, long n);
455 
456 unsigned long __clear_user(void __user *to, unsigned long n);
457 
458 #define clear_user	__clear_user
459 
460 #else /* !CONFIG_MMU */
461 #include <asm-generic/uaccess.h>
462 #endif
463 
464 #endif /* _M68K_UACCESS_H */
465