xref: /openbmc/linux/arch/powerpc/include/asm/uaccess.h (revision 8365a898)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ARCH_POWERPC_UACCESS_H
3 #define _ARCH_POWERPC_UACCESS_H
4 
5 #include <asm/ppc_asm.h>
6 #include <asm/processor.h>
7 #include <asm/page.h>
8 #include <asm/extable.h>
9 #include <asm/kup.h>
10 
11 /*
12  * The fs value determines whether argument validity checking should be
13  * performed or not.  If get_fs() == USER_DS, checking is performed, with
14  * get_fs() == KERNEL_DS, checking is bypassed.
15  *
16  * For historical reasons, these macros are grossly misnamed.
17  *
18  * The fs/ds values are now the highest legal address in the "segment".
19  * This simplifies the checking in the routines below.
20  */
21 
22 #define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
23 
24 #define KERNEL_DS	MAKE_MM_SEG(~0UL)
25 #ifdef __powerpc64__
26 /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
27 #define USER_DS		MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
28 #else
29 #define USER_DS		MAKE_MM_SEG(TASK_SIZE - 1)
30 #endif
31 
32 #define get_fs()	(current->thread.addr_limit)
33 
34 static inline void set_fs(mm_segment_t fs)
35 {
36 	current->thread.addr_limit = fs;
37 	/* On user-mode return check addr_limit (fs) is correct */
38 	set_thread_flag(TIF_FSCHECK);
39 }
40 
41 #define segment_eq(a, b)	((a).seg == (b).seg)
42 
43 #define user_addr_max()	(get_fs().seg)
44 
45 #ifdef __powerpc64__
46 /*
47  * This check is sufficient because there is a large enough
48  * gap between user addresses and the kernel addresses
49  */
50 #define __access_ok(addr, size, segment)	\
51 	(((addr) <= (segment).seg) && ((size) <= (segment).seg))
52 
53 #else
54 
55 static inline int __access_ok(unsigned long addr, unsigned long size,
56 			mm_segment_t seg)
57 {
58 	if (addr > seg.seg)
59 		return 0;
60 	return (size == 0 || size - 1 <= seg.seg - addr);
61 }
62 
63 #endif
64 
65 #define access_ok(addr, size)		\
66 	(__chk_user_ptr(addr),		\
67 	 __access_ok((__force unsigned long)(addr), (size), get_fs()))
68 
69 /*
70  * These are the main single-value transfer routines.  They automatically
71  * use the right size if we just have the right pointer type.
72  *
73  * This gets kind of ugly. We want to return _two_ values in "get_user()"
74  * and yet we don't want to do any pointers, because that is too much
75  * of a performance impact. Thus we have a few rather ugly macros here,
76  * and hide all the ugliness from the user.
77  *
78  * The "__xxx" versions of the user access functions are versions that
79  * do not verify the address space, that must have been done previously
80  * with a separate "access_ok()" call (this is used when we do multiple
81  * accesses to the same area of user memory).
82  *
83  * As we use the same address space for kernel and user data on the
84  * PowerPC, we can just do these as direct assignments.  (Of course, the
85  * exception handling means that it's no longer "just"...)
86  *
87  */
88 #define get_user(x, ptr) \
89 	__get_user_check((x), (ptr), sizeof(*(ptr)))
90 #define put_user(x, ptr) \
91 	__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
92 
93 #define __get_user(x, ptr) \
94 	__get_user_nocheck((x), (ptr), sizeof(*(ptr)), true)
95 #define __put_user(x, ptr) \
96 	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
97 #define __put_user_goto(x, ptr, label) \
98 	__put_user_nocheck_goto((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
99 
100 #define __get_user_allowed(x, ptr) \
101 	__get_user_nocheck((x), (ptr), sizeof(*(ptr)), false)
102 
103 #define __get_user_inatomic(x, ptr) \
104 	__get_user_nosleep((x), (ptr), sizeof(*(ptr)))
105 #define __put_user_inatomic(x, ptr) \
106 	__put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
107 
108 #ifdef CONFIG_PPC64
109 
110 #define ___get_user_instr(gu_op, dest, ptr)				\
111 ({									\
112 	long __gui_ret = 0;						\
113 	unsigned long __gui_ptr = (unsigned long)ptr;			\
114 	struct ppc_inst __gui_inst;					\
115 	unsigned int __prefix, __suffix;				\
116 	__gui_ret = gu_op(__prefix, (unsigned int __user *)__gui_ptr);	\
117 	if (__gui_ret == 0) {						\
118 		if ((__prefix >> 26) == OP_PREFIX) {			\
119 			__gui_ret = gu_op(__suffix,			\
120 				(unsigned int __user *)__gui_ptr + 1);	\
121 			__gui_inst = ppc_inst_prefix(__prefix,		\
122 						     __suffix);		\
123 		} else {						\
124 			__gui_inst = ppc_inst(__prefix);		\
125 		}							\
126 		if (__gui_ret == 0)					\
127 			(dest) = __gui_inst;				\
128 	}								\
129 	__gui_ret;							\
130 })
131 
132 #define get_user_instr(x, ptr) \
133 	___get_user_instr(get_user, x, ptr)
134 
135 #define __get_user_instr(x, ptr) \
136 	___get_user_instr(__get_user, x, ptr)
137 
138 #define __get_user_instr_inatomic(x, ptr) \
139 	___get_user_instr(__get_user_inatomic, x, ptr)
140 
141 #else /* !CONFIG_PPC64 */
142 #define get_user_instr(x, ptr) \
143 	get_user((x).val, (u32 __user *)(ptr))
144 
145 #define __get_user_instr(x, ptr) \
146 	__get_user_nocheck((x).val, (u32 __user *)(ptr), sizeof(u32), true)
147 
148 #define __get_user_instr_inatomic(x, ptr) \
149 	__get_user_nosleep((x).val, (u32 __user *)(ptr), sizeof(u32))
150 
151 #endif /* CONFIG_PPC64 */
152 
153 extern long __put_user_bad(void);
154 
155 /*
156  * We don't tell gcc that we are accessing memory, but this is OK
157  * because we do not write to any memory gcc knows about, so there
158  * are no aliasing issues.
159  */
160 #define __put_user_asm(x, addr, err, op)			\
161 	__asm__ __volatile__(					\
162 		"1:	" op " %1,0(%2)	# put_user\n"		\
163 		"2:\n"						\
164 		".section .fixup,\"ax\"\n"			\
165 		"3:	li %0,%3\n"				\
166 		"	b 2b\n"					\
167 		".previous\n"					\
168 		EX_TABLE(1b, 3b)				\
169 		: "=r" (err)					\
170 		: "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
171 
172 #ifdef __powerpc64__
173 #define __put_user_asm2(x, ptr, retval)				\
174 	  __put_user_asm(x, ptr, retval, "std")
175 #else /* __powerpc64__ */
176 #define __put_user_asm2(x, addr, err)				\
177 	__asm__ __volatile__(					\
178 		"1:	stw %1,0(%2)\n"				\
179 		"2:	stw %1+1,4(%2)\n"			\
180 		"3:\n"						\
181 		".section .fixup,\"ax\"\n"			\
182 		"4:	li %0,%3\n"				\
183 		"	b 3b\n"					\
184 		".previous\n"					\
185 		EX_TABLE(1b, 4b)				\
186 		EX_TABLE(2b, 4b)				\
187 		: "=r" (err)					\
188 		: "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
189 #endif /* __powerpc64__ */
190 
191 #define __put_user_size_allowed(x, ptr, size, retval)		\
192 do {								\
193 	retval = 0;						\
194 	switch (size) {						\
195 	  case 1: __put_user_asm(x, ptr, retval, "stb"); break;	\
196 	  case 2: __put_user_asm(x, ptr, retval, "sth"); break;	\
197 	  case 4: __put_user_asm(x, ptr, retval, "stw"); break;	\
198 	  case 8: __put_user_asm2(x, ptr, retval); break;	\
199 	  default: __put_user_bad();				\
200 	}							\
201 } while (0)
202 
203 #define __put_user_size(x, ptr, size, retval)			\
204 do {								\
205 	allow_write_to_user(ptr, size);				\
206 	__put_user_size_allowed(x, ptr, size, retval);		\
207 	prevent_write_to_user(ptr, size);			\
208 } while (0)
209 
210 #define __put_user_nocheck(x, ptr, size)			\
211 ({								\
212 	long __pu_err;						\
213 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
214 	__typeof__(*(ptr)) __pu_val = (x);			\
215 	__typeof__(size) __pu_size = (size);			\
216 								\
217 	if (!is_kernel_addr((unsigned long)__pu_addr))		\
218 		might_fault();					\
219 	__chk_user_ptr(__pu_addr);				\
220 	__put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err);	\
221 								\
222 	__pu_err;						\
223 })
224 
225 #define __put_user_check(x, ptr, size)					\
226 ({									\
227 	long __pu_err = -EFAULT;					\
228 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
229 	__typeof__(*(ptr)) __pu_val = (x);				\
230 	__typeof__(size) __pu_size = (size);				\
231 									\
232 	might_fault();							\
233 	if (access_ok(__pu_addr, __pu_size))				\
234 		__put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
235 									\
236 	__pu_err;							\
237 })
238 
239 #define __put_user_nosleep(x, ptr, size)			\
240 ({								\
241 	long __pu_err;						\
242 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
243 	__typeof__(*(ptr)) __pu_val = (x);			\
244 	__typeof__(size) __pu_size = (size);			\
245 								\
246 	__chk_user_ptr(__pu_addr);				\
247 	__put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
248 								\
249 	__pu_err;						\
250 })
251 
252 
253 #define __put_user_asm_goto(x, addr, label, op)			\
254 	asm volatile goto(					\
255 		"1:	" op "%U1%X1 %0,%1	# put_user\n"	\
256 		EX_TABLE(1b, %l2)				\
257 		:						\
258 		: "r" (x), "m" (*addr)				\
259 		:						\
260 		: label)
261 
262 #ifdef __powerpc64__
263 #define __put_user_asm2_goto(x, ptr, label)			\
264 	__put_user_asm_goto(x, ptr, label, "std")
265 #else /* __powerpc64__ */
266 #define __put_user_asm2_goto(x, addr, label)			\
267 	asm volatile goto(					\
268 		"1:	stw%X1 %0, %1\n"			\
269 		"2:	stw%X1 %L0, %L1\n"			\
270 		EX_TABLE(1b, %l2)				\
271 		EX_TABLE(2b, %l2)				\
272 		:						\
273 		: "r" (x), "m" (*addr)				\
274 		:						\
275 		: label)
276 #endif /* __powerpc64__ */
277 
278 #define __put_user_size_goto(x, ptr, size, label)		\
279 do {								\
280 	switch (size) {						\
281 	case 1: __put_user_asm_goto(x, ptr, label, "stb"); break;	\
282 	case 2: __put_user_asm_goto(x, ptr, label, "sth"); break;	\
283 	case 4: __put_user_asm_goto(x, ptr, label, "stw"); break;	\
284 	case 8: __put_user_asm2_goto(x, ptr, label); break;	\
285 	default: __put_user_bad();				\
286 	}							\
287 } while (0)
288 
289 #define __put_user_nocheck_goto(x, ptr, size, label)		\
290 do {								\
291 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
292 	if (!is_kernel_addr((unsigned long)__pu_addr))		\
293 		might_fault();					\
294 	__chk_user_ptr(ptr);					\
295 	__put_user_size_goto((x), __pu_addr, (size), label);	\
296 } while (0)
297 
298 
299 extern long __get_user_bad(void);
300 
301 /*
302  * This does an atomic 128 byte aligned load from userspace.
303  * Upto caller to do enable_kernel_vmx() before calling!
304  */
305 #define __get_user_atomic_128_aligned(kaddr, uaddr, err)		\
306 	__asm__ __volatile__(				\
307 		"1:	lvx  0,0,%1	# get user\n"	\
308 		" 	stvx 0,0,%2	# put kernel\n"	\
309 		"2:\n"					\
310 		".section .fixup,\"ax\"\n"		\
311 		"3:	li %0,%3\n"			\
312 		"	b 2b\n"				\
313 		".previous\n"				\
314 		EX_TABLE(1b, 3b)			\
315 		: "=r" (err)			\
316 		: "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err))
317 
318 #define __get_user_asm(x, addr, err, op)		\
319 	__asm__ __volatile__(				\
320 		"1:	"op" %1,0(%2)	# get_user\n"	\
321 		"2:\n"					\
322 		".section .fixup,\"ax\"\n"		\
323 		"3:	li %0,%3\n"			\
324 		"	li %1,0\n"			\
325 		"	b 2b\n"				\
326 		".previous\n"				\
327 		EX_TABLE(1b, 3b)			\
328 		: "=r" (err), "=r" (x)			\
329 		: "b" (addr), "i" (-EFAULT), "0" (err))
330 
331 #ifdef __powerpc64__
332 #define __get_user_asm2(x, addr, err)			\
333 	__get_user_asm(x, addr, err, "ld")
334 #else /* __powerpc64__ */
335 #define __get_user_asm2(x, addr, err)			\
336 	__asm__ __volatile__(				\
337 		"1:	lwz %1,0(%2)\n"			\
338 		"2:	lwz %1+1,4(%2)\n"		\
339 		"3:\n"					\
340 		".section .fixup,\"ax\"\n"		\
341 		"4:	li %0,%3\n"			\
342 		"	li %1,0\n"			\
343 		"	li %1+1,0\n"			\
344 		"	b 3b\n"				\
345 		".previous\n"				\
346 		EX_TABLE(1b, 4b)			\
347 		EX_TABLE(2b, 4b)			\
348 		: "=r" (err), "=&r" (x)			\
349 		: "b" (addr), "i" (-EFAULT), "0" (err))
350 #endif /* __powerpc64__ */
351 
352 #define __get_user_size_allowed(x, ptr, size, retval)		\
353 do {								\
354 	retval = 0;						\
355 	__chk_user_ptr(ptr);					\
356 	if (size > sizeof(x))					\
357 		(x) = __get_user_bad();				\
358 	switch (size) {						\
359 	case 1: __get_user_asm(x, ptr, retval, "lbz"); break;	\
360 	case 2: __get_user_asm(x, ptr, retval, "lhz"); break;	\
361 	case 4: __get_user_asm(x, ptr, retval, "lwz"); break;	\
362 	case 8: __get_user_asm2(x, ptr, retval);  break;	\
363 	default: (x) = __get_user_bad();			\
364 	}							\
365 } while (0)
366 
367 #define __get_user_size(x, ptr, size, retval)			\
368 do {								\
369 	allow_read_from_user(ptr, size);			\
370 	__get_user_size_allowed(x, ptr, size, retval);		\
371 	prevent_read_from_user(ptr, size);			\
372 } while (0)
373 
374 /*
375  * This is a type: either unsigned long, if the argument fits into
376  * that type, or otherwise unsigned long long.
377  */
378 #define __long_type(x) \
379 	__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
380 
381 #define __get_user_nocheck(x, ptr, size, do_allow)			\
382 ({								\
383 	long __gu_err;						\
384 	__long_type(*(ptr)) __gu_val;				\
385 	__typeof__(*(ptr)) __user *__gu_addr = (ptr);	\
386 	__typeof__(size) __gu_size = (size);			\
387 								\
388 	__chk_user_ptr(__gu_addr);				\
389 	if (!is_kernel_addr((unsigned long)__gu_addr))		\
390 		might_fault();					\
391 	barrier_nospec();					\
392 	if (do_allow)								\
393 		__get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err);	\
394 	else									\
395 		__get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
396 	(x) = (__typeof__(*(ptr)))__gu_val;			\
397 								\
398 	__gu_err;						\
399 })
400 
401 #define __get_user_check(x, ptr, size)					\
402 ({									\
403 	long __gu_err = -EFAULT;					\
404 	__long_type(*(ptr)) __gu_val = 0;				\
405 	__typeof__(*(ptr)) __user *__gu_addr = (ptr);		\
406 	__typeof__(size) __gu_size = (size);				\
407 									\
408 	might_fault();							\
409 	if (access_ok(__gu_addr, __gu_size)) {				\
410 		barrier_nospec();					\
411 		__get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
412 	}								\
413 	(x) = (__force __typeof__(*(ptr)))__gu_val;				\
414 									\
415 	__gu_err;							\
416 })
417 
418 #define __get_user_nosleep(x, ptr, size)			\
419 ({								\
420 	long __gu_err;						\
421 	__long_type(*(ptr)) __gu_val;				\
422 	__typeof__(*(ptr)) __user *__gu_addr = (ptr);	\
423 	__typeof__(size) __gu_size = (size);			\
424 								\
425 	__chk_user_ptr(__gu_addr);				\
426 	barrier_nospec();					\
427 	__get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
428 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
429 								\
430 	__gu_err;						\
431 })
432 
433 
434 /* more complex routines */
435 
436 extern unsigned long __copy_tofrom_user(void __user *to,
437 		const void __user *from, unsigned long size);
438 
439 #ifdef __powerpc64__
440 static inline unsigned long
441 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
442 {
443 	unsigned long ret;
444 
445 	barrier_nospec();
446 	allow_read_write_user(to, from, n);
447 	ret = __copy_tofrom_user(to, from, n);
448 	prevent_read_write_user(to, from, n);
449 	return ret;
450 }
451 #endif /* __powerpc64__ */
452 
453 static inline unsigned long raw_copy_from_user(void *to,
454 		const void __user *from, unsigned long n)
455 {
456 	unsigned long ret;
457 	if (__builtin_constant_p(n) && (n <= 8)) {
458 		ret = 1;
459 
460 		switch (n) {
461 		case 1:
462 			barrier_nospec();
463 			__get_user_size(*(u8 *)to, from, 1, ret);
464 			break;
465 		case 2:
466 			barrier_nospec();
467 			__get_user_size(*(u16 *)to, from, 2, ret);
468 			break;
469 		case 4:
470 			barrier_nospec();
471 			__get_user_size(*(u32 *)to, from, 4, ret);
472 			break;
473 		case 8:
474 			barrier_nospec();
475 			__get_user_size(*(u64 *)to, from, 8, ret);
476 			break;
477 		}
478 		if (ret == 0)
479 			return 0;
480 	}
481 
482 	barrier_nospec();
483 	allow_read_from_user(from, n);
484 	ret = __copy_tofrom_user((__force void __user *)to, from, n);
485 	prevent_read_from_user(from, n);
486 	return ret;
487 }
488 
489 static inline unsigned long
490 raw_copy_to_user_allowed(void __user *to, const void *from, unsigned long n)
491 {
492 	if (__builtin_constant_p(n) && (n <= 8)) {
493 		unsigned long ret = 1;
494 
495 		switch (n) {
496 		case 1:
497 			__put_user_size_allowed(*(u8 *)from, (u8 __user *)to, 1, ret);
498 			break;
499 		case 2:
500 			__put_user_size_allowed(*(u16 *)from, (u16 __user *)to, 2, ret);
501 			break;
502 		case 4:
503 			__put_user_size_allowed(*(u32 *)from, (u32 __user *)to, 4, ret);
504 			break;
505 		case 8:
506 			__put_user_size_allowed(*(u64 *)from, (u64 __user *)to, 8, ret);
507 			break;
508 		}
509 		if (ret == 0)
510 			return 0;
511 	}
512 
513 	return __copy_tofrom_user(to, (__force const void __user *)from, n);
514 }
515 
516 static inline unsigned long
517 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
518 {
519 	unsigned long ret;
520 
521 	allow_write_to_user(to, n);
522 	ret = raw_copy_to_user_allowed(to, from, n);
523 	prevent_write_to_user(to, n);
524 	return ret;
525 }
526 
527 static __always_inline unsigned long __must_check
528 copy_to_user_mcsafe(void __user *to, const void *from, unsigned long n)
529 {
530 	if (likely(check_copy_size(from, n, true))) {
531 		if (access_ok(to, n)) {
532 			allow_write_to_user(to, n);
533 			n = memcpy_mcsafe((void *)to, from, n);
534 			prevent_write_to_user(to, n);
535 		}
536 	}
537 
538 	return n;
539 }
540 
541 unsigned long __arch_clear_user(void __user *addr, unsigned long size);
542 
543 static inline unsigned long clear_user(void __user *addr, unsigned long size)
544 {
545 	unsigned long ret = size;
546 	might_fault();
547 	if (likely(access_ok(addr, size))) {
548 		allow_write_to_user(addr, size);
549 		ret = __arch_clear_user(addr, size);
550 		prevent_write_to_user(addr, size);
551 	}
552 	return ret;
553 }
554 
555 static inline unsigned long __clear_user(void __user *addr, unsigned long size)
556 {
557 	return clear_user(addr, size);
558 }
559 
560 extern long strncpy_from_user(char *dst, const char __user *src, long count);
561 extern __must_check long strnlen_user(const char __user *str, long n);
562 
563 extern long __copy_from_user_flushcache(void *dst, const void __user *src,
564 		unsigned size);
565 extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
566 			   size_t len);
567 
568 static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
569 {
570 	if (unlikely(!access_ok(ptr, len)))
571 		return false;
572 	allow_read_write_user((void __user *)ptr, ptr, len);
573 	return true;
574 }
575 #define user_access_begin	user_access_begin
576 #define user_access_end		prevent_current_access_user
577 #define user_access_save	prevent_user_access_return
578 #define user_access_restore	restore_user_access
579 
580 static __must_check inline bool
581 user_read_access_begin(const void __user *ptr, size_t len)
582 {
583 	if (unlikely(!access_ok(ptr, len)))
584 		return false;
585 	allow_read_from_user(ptr, len);
586 	return true;
587 }
588 #define user_read_access_begin	user_read_access_begin
589 #define user_read_access_end		prevent_current_read_from_user
590 
591 static __must_check inline bool
592 user_write_access_begin(const void __user *ptr, size_t len)
593 {
594 	if (unlikely(!access_ok(ptr, len)))
595 		return false;
596 	allow_write_to_user((void __user *)ptr, len);
597 	return true;
598 }
599 #define user_write_access_begin	user_write_access_begin
600 #define user_write_access_end		prevent_current_write_to_user
601 
602 #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
603 #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
604 #define unsafe_put_user(x, p, e) __put_user_goto(x, p, e)
605 
606 #define unsafe_copy_to_user(d, s, l, e) \
607 do {									\
608 	u8 __user *_dst = (u8 __user *)(d);				\
609 	const u8 *_src = (const u8 *)(s);				\
610 	size_t _len = (l);						\
611 	int _i;								\
612 									\
613 	for (_i = 0; _i < (_len & ~(sizeof(long) - 1)); _i += sizeof(long))		\
614 		__put_user_goto(*(long*)(_src + _i), (long __user *)(_dst + _i), e);\
615 	if (IS_ENABLED(CONFIG_PPC64) && (_len & 4)) {			\
616 		__put_user_goto(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e);	\
617 		_i += 4;						\
618 	}								\
619 	if (_len & 2) {							\
620 		__put_user_goto(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e);	\
621 		_i += 2;						\
622 	}								\
623 	if (_len & 1) \
624 		__put_user_goto(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e);\
625 } while (0)
626 
627 #endif	/* _ARCH_POWERPC_UACCESS_H */
628