1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2b8b572e1SStephen Rothwell #ifndef _ARCH_POWERPC_UACCESS_H
3b8b572e1SStephen Rothwell #define _ARCH_POWERPC_UACCESS_H
4b8b572e1SStephen Rothwell
5b8b572e1SStephen Rothwell #include <asm/processor.h>
6b8b572e1SStephen Rothwell #include <asm/page.h>
7527b5baeSAl Viro #include <asm/extable.h>
8de78a9c4SChristophe Leroy #include <asm/kup.h>
9*ae619de5SMichael Ellerman #include <asm/asm-compat.h>
10b8b572e1SStephen Rothwell
11b8b572e1SStephen Rothwell #ifdef __powerpc64__
12b8b572e1SStephen Rothwell /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
135ae4998bSChristoph Hellwig #define TASK_SIZE_MAX TASK_SIZE_USER64
14b8b572e1SStephen Rothwell #endif
15b8b572e1SStephen Rothwell
1612700c17SArnd Bergmann #include <asm-generic/access_ok.h>
17b8b572e1SStephen Rothwell
18b8b572e1SStephen Rothwell /*
19b8b572e1SStephen Rothwell * These are the main single-value transfer routines. They automatically
20b8b572e1SStephen Rothwell * use the right size if we just have the right pointer type.
21b8b572e1SStephen Rothwell *
22b8b572e1SStephen Rothwell * This gets kind of ugly. We want to return _two_ values in "get_user()"
23b8b572e1SStephen Rothwell * and yet we don't want to do any pointers, because that is too much
24b8b572e1SStephen Rothwell * of a performance impact. Thus we have a few rather ugly macros here,
25b8b572e1SStephen Rothwell * and hide all the ugliness from the user.
26b8b572e1SStephen Rothwell *
27b8b572e1SStephen Rothwell * The "__xxx" versions of the user access functions are versions that
28b8b572e1SStephen Rothwell * do not verify the address space, that must have been done previously
29b8b572e1SStephen Rothwell * with a separate "access_ok()" call (this is used when we do multiple
30b8b572e1SStephen Rothwell * accesses to the same area of user memory).
31b8b572e1SStephen Rothwell *
32b8b572e1SStephen Rothwell * As we use the same address space for kernel and user data on the
33b8b572e1SStephen Rothwell * PowerPC, we can just do these as direct assignments. (Of course, the
34b8b572e1SStephen Rothwell * exception handling means that it's no longer "just"...)
35b8b572e1SStephen Rothwell *
36b8b572e1SStephen Rothwell */
3717f8c0bcSChristophe Leroy #define __put_user(x, ptr) \
38b8b572e1SStephen Rothwell ({ \
39b8b572e1SStephen Rothwell long __pu_err; \
40b8b572e1SStephen Rothwell __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
4117f8c0bcSChristophe Leroy __typeof__(*(ptr)) __pu_val = (__typeof__(*(ptr)))(x); \
4217f8c0bcSChristophe Leroy __typeof__(sizeof(*(ptr))) __pu_size = sizeof(*(ptr)); \
43d02f6b7dSNicholas Piggin \
441af1717dSMichael S. Tsirkin might_fault(); \
45e72fcdb2SChristophe Leroy do { \
46e72fcdb2SChristophe Leroy __label__ __pu_failed; \
47e72fcdb2SChristophe Leroy \
48e72fcdb2SChristophe Leroy allow_write_to_user(__pu_addr, __pu_size); \
49e72fcdb2SChristophe Leroy __put_user_size_goto(__pu_val, __pu_addr, __pu_size, __pu_failed); \
50e72fcdb2SChristophe Leroy prevent_write_to_user(__pu_addr, __pu_size); \
51e72fcdb2SChristophe Leroy __pu_err = 0; \
52e72fcdb2SChristophe Leroy break; \
53e72fcdb2SChristophe Leroy \
54e72fcdb2SChristophe Leroy __pu_failed: \
55e72fcdb2SChristophe Leroy prevent_write_to_user(__pu_addr, __pu_size); \
56e72fcdb2SChristophe Leroy __pu_err = -EFAULT; \
57e72fcdb2SChristophe Leroy } while (0); \
58d02f6b7dSNicholas Piggin \
59b8b572e1SStephen Rothwell __pu_err; \
60b8b572e1SStephen Rothwell })
61b8b572e1SStephen Rothwell
6217f8c0bcSChristophe Leroy #define put_user(x, ptr) \
63b8b572e1SStephen Rothwell ({ \
64e72fcdb2SChristophe Leroy __typeof__(*(ptr)) __user *_pu_addr = (ptr); \
65d02f6b7dSNicholas Piggin \
66e72fcdb2SChristophe Leroy access_ok(_pu_addr, sizeof(*(ptr))) ? \
67e72fcdb2SChristophe Leroy __put_user(x, _pu_addr) : -EFAULT; \
68b8b572e1SStephen Rothwell })
69b8b572e1SStephen Rothwell
707fdf966bSChristophe Leroy /*
717fdf966bSChristophe Leroy * We don't tell gcc that we are accessing memory, but this is OK
727fdf966bSChristophe Leroy * because we do not write to any memory gcc knows about, so there
737fdf966bSChristophe Leroy * are no aliasing issues.
747fdf966bSChristophe Leroy */
75dc5dac74SNicholas Piggin /* -mprefixed can generate offsets beyond range, fall back hack */
76dc5dac74SNicholas Piggin #ifdef CONFIG_PPC_KERNEL_PREFIXED
77dc5dac74SNicholas Piggin #define __put_user_asm_goto(x, addr, label, op) \
78aaff74d8SLinus Torvalds asm goto( \
79dc5dac74SNicholas Piggin "1: " op " %0,0(%1) # put_user\n" \
80dc5dac74SNicholas Piggin EX_TABLE(1b, %l2) \
81dc5dac74SNicholas Piggin : \
82dc5dac74SNicholas Piggin : "r" (x), "b" (addr) \
83dc5dac74SNicholas Piggin : \
84dc5dac74SNicholas Piggin : label)
85dc5dac74SNicholas Piggin #else
86334710b1SChristophe Leroy #define __put_user_asm_goto(x, addr, label, op) \
87aaff74d8SLinus Torvalds asm goto( \
88334710b1SChristophe Leroy "1: " op "%U1%X1 %0,%1 # put_user\n" \
89334710b1SChristophe Leroy EX_TABLE(1b, %l2) \
90334710b1SChristophe Leroy : \
912a24d80fSNick Desaulniers : "r" (x), "m<>" (*addr) \
92334710b1SChristophe Leroy : \
93334710b1SChristophe Leroy : label)
94dc5dac74SNicholas Piggin #endif
95334710b1SChristophe Leroy
96334710b1SChristophe Leroy #ifdef __powerpc64__
97af4cff0dSMichael Ellerman #ifdef CONFIG_PPC_KERNEL_PREFIXED
98334710b1SChristophe Leroy #define __put_user_asm2_goto(x, ptr, label) \
99334710b1SChristophe Leroy __put_user_asm_goto(x, ptr, label, "std")
100af4cff0dSMichael Ellerman #else
101af4cff0dSMichael Ellerman #define __put_user_asm2_goto(x, addr, label) \
102af4cff0dSMichael Ellerman asm goto ("1: std%U1%X1 %0,%1 # put_user\n" \
103af4cff0dSMichael Ellerman EX_TABLE(1b, %l2) \
104af4cff0dSMichael Ellerman : \
105af4cff0dSMichael Ellerman : "r" (x), DS_FORM_CONSTRAINT (*addr) \
106af4cff0dSMichael Ellerman : \
107af4cff0dSMichael Ellerman : label)
108af4cff0dSMichael Ellerman #endif // CONFIG_PPC_KERNEL_PREFIXED
109334710b1SChristophe Leroy #else /* __powerpc64__ */
110334710b1SChristophe Leroy #define __put_user_asm2_goto(x, addr, label) \
111aaff74d8SLinus Torvalds asm goto( \
112334710b1SChristophe Leroy "1: stw%X1 %0, %1\n" \
113334710b1SChristophe Leroy "2: stw%X1 %L0, %L1\n" \
114334710b1SChristophe Leroy EX_TABLE(1b, %l2) \
115334710b1SChristophe Leroy EX_TABLE(2b, %l2) \
116334710b1SChristophe Leroy : \
117334710b1SChristophe Leroy : "r" (x), "m" (*addr) \
118334710b1SChristophe Leroy : \
119334710b1SChristophe Leroy : label)
120334710b1SChristophe Leroy #endif /* __powerpc64__ */
121334710b1SChristophe Leroy
122334710b1SChristophe Leroy #define __put_user_size_goto(x, ptr, size, label) \
123334710b1SChristophe Leroy do { \
124be15a165SChristophe Leroy __typeof__(*(ptr)) __user *__pus_addr = (ptr); \
125be15a165SChristophe Leroy \
126334710b1SChristophe Leroy switch (size) { \
127be15a165SChristophe Leroy case 1: __put_user_asm_goto(x, __pus_addr, label, "stb"); break; \
128be15a165SChristophe Leroy case 2: __put_user_asm_goto(x, __pus_addr, label, "sth"); break; \
129be15a165SChristophe Leroy case 4: __put_user_asm_goto(x, __pus_addr, label, "stw"); break; \
130be15a165SChristophe Leroy case 8: __put_user_asm2_goto(x, __pus_addr, label); break; \
1319975f852SChristophe Leroy default: BUILD_BUG(); \
132334710b1SChristophe Leroy } \
133334710b1SChristophe Leroy } while (0)
134334710b1SChristophe Leroy
1355080332cSMichael Neuling /*
1365080332cSMichael Neuling * This does an atomic 128 byte aligned load from userspace.
1375080332cSMichael Neuling * Upto caller to do enable_kernel_vmx() before calling!
1385080332cSMichael Neuling */
1395080332cSMichael Neuling #define __get_user_atomic_128_aligned(kaddr, uaddr, err) \
1405080332cSMichael Neuling __asm__ __volatile__( \
1418667d0d6SAnders Roxell ".machine push\n" \
1428667d0d6SAnders Roxell ".machine altivec\n" \
1435080332cSMichael Neuling "1: lvx 0,0,%1 # get user\n" \
1445080332cSMichael Neuling " stvx 0,0,%2 # put kernel\n" \
1458667d0d6SAnders Roxell ".machine pop\n" \
1465080332cSMichael Neuling "2:\n" \
1475080332cSMichael Neuling ".section .fixup,\"ax\"\n" \
1485080332cSMichael Neuling "3: li %0,%3\n" \
1495080332cSMichael Neuling " b 2b\n" \
1505080332cSMichael Neuling ".previous\n" \
1515080332cSMichael Neuling EX_TABLE(1b, 3b) \
1525080332cSMichael Neuling : "=r" (err) \
1535080332cSMichael Neuling : "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err))
1545080332cSMichael Neuling
1555cd29b1fSChristophe Leroy #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
1565cd29b1fSChristophe Leroy
157dc5dac74SNicholas Piggin /* -mprefixed can generate offsets beyond range, fall back hack */
158dc5dac74SNicholas Piggin #ifdef CONFIG_PPC_KERNEL_PREFIXED
159dc5dac74SNicholas Piggin #define __get_user_asm_goto(x, addr, label, op) \
160aaff74d8SLinus Torvalds asm_goto_output( \
161dc5dac74SNicholas Piggin "1: "op" %0,0(%1) # get_user\n" \
162dc5dac74SNicholas Piggin EX_TABLE(1b, %l2) \
163dc5dac74SNicholas Piggin : "=r" (x) \
164dc5dac74SNicholas Piggin : "b" (addr) \
165dc5dac74SNicholas Piggin : \
166dc5dac74SNicholas Piggin : label)
167dc5dac74SNicholas Piggin #else
1685cd29b1fSChristophe Leroy #define __get_user_asm_goto(x, addr, label, op) \
169aaff74d8SLinus Torvalds asm_goto_output( \
1705cd29b1fSChristophe Leroy "1: "op"%U1%X1 %0, %1 # get_user\n" \
1715cd29b1fSChristophe Leroy EX_TABLE(1b, %l2) \
1725cd29b1fSChristophe Leroy : "=r" (x) \
1732a24d80fSNick Desaulniers : "m<>" (*addr) \
1745cd29b1fSChristophe Leroy : \
1755cd29b1fSChristophe Leroy : label)
176dc5dac74SNicholas Piggin #endif
1775cd29b1fSChristophe Leroy
1785cd29b1fSChristophe Leroy #ifdef __powerpc64__
1795cd29b1fSChristophe Leroy #define __get_user_asm2_goto(x, addr, label) \
1805cd29b1fSChristophe Leroy __get_user_asm_goto(x, addr, label, "ld")
1815cd29b1fSChristophe Leroy #else /* __powerpc64__ */
1825cd29b1fSChristophe Leroy #define __get_user_asm2_goto(x, addr, label) \
183aaff74d8SLinus Torvalds asm_goto_output( \
1845cd29b1fSChristophe Leroy "1: lwz%X1 %0, %1\n" \
1855cd29b1fSChristophe Leroy "2: lwz%X1 %L0, %L1\n" \
1865cd29b1fSChristophe Leroy EX_TABLE(1b, %l2) \
1875cd29b1fSChristophe Leroy EX_TABLE(2b, %l2) \
1887315e457SChristophe Leroy : "=&r" (x) \
1895cd29b1fSChristophe Leroy : "m" (*addr) \
1905cd29b1fSChristophe Leroy : \
1915cd29b1fSChristophe Leroy : label)
1925cd29b1fSChristophe Leroy #endif /* __powerpc64__ */
1935cd29b1fSChristophe Leroy
1945cd29b1fSChristophe Leroy #define __get_user_size_goto(x, ptr, size, label) \
1955cd29b1fSChristophe Leroy do { \
1965cd29b1fSChristophe Leroy BUILD_BUG_ON(size > sizeof(x)); \
1975cd29b1fSChristophe Leroy switch (size) { \
1985cd29b1fSChristophe Leroy case 1: __get_user_asm_goto(x, (u8 __user *)ptr, label, "lbz"); break; \
1995cd29b1fSChristophe Leroy case 2: __get_user_asm_goto(x, (u16 __user *)ptr, label, "lhz"); break; \
2005cd29b1fSChristophe Leroy case 4: __get_user_asm_goto(x, (u32 __user *)ptr, label, "lwz"); break; \
2015cd29b1fSChristophe Leroy case 8: __get_user_asm2_goto(x, (u64 __user *)ptr, label); break; \
2025cd29b1fSChristophe Leroy default: x = 0; BUILD_BUG(); \
2035cd29b1fSChristophe Leroy } \
2045cd29b1fSChristophe Leroy } while (0)
2055cd29b1fSChristophe Leroy
2065cd29b1fSChristophe Leroy #define __get_user_size_allowed(x, ptr, size, retval) \
2075cd29b1fSChristophe Leroy do { \
2085cd29b1fSChristophe Leroy __label__ __gus_failed; \
2095cd29b1fSChristophe Leroy \
2105cd29b1fSChristophe Leroy __get_user_size_goto(x, ptr, size, __gus_failed); \
2115cd29b1fSChristophe Leroy retval = 0; \
2125cd29b1fSChristophe Leroy break; \
2135cd29b1fSChristophe Leroy __gus_failed: \
2145cd29b1fSChristophe Leroy x = 0; \
2155cd29b1fSChristophe Leroy retval = -EFAULT; \
2165cd29b1fSChristophe Leroy } while (0)
2175cd29b1fSChristophe Leroy
2185cd29b1fSChristophe Leroy #else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
2195cd29b1fSChristophe Leroy
220b8b572e1SStephen Rothwell #define __get_user_asm(x, addr, err, op) \
221b8b572e1SStephen Rothwell __asm__ __volatile__( \
2222f279eebSChristophe Leroy "1: "op"%U2%X2 %1, %2 # get_user\n" \
223b8b572e1SStephen Rothwell "2:\n" \
224b8b572e1SStephen Rothwell ".section .fixup,\"ax\"\n" \
225b8b572e1SStephen Rothwell "3: li %0,%3\n" \
226b8b572e1SStephen Rothwell " li %1,0\n" \
227b8b572e1SStephen Rothwell " b 2b\n" \
228b8b572e1SStephen Rothwell ".previous\n" \
22924bfa6a9SNicholas Piggin EX_TABLE(1b, 3b) \
230b8b572e1SStephen Rothwell : "=r" (err), "=r" (x) \
2312a24d80fSNick Desaulniers : "m<>" (*addr), "i" (-EFAULT), "0" (err))
232b8b572e1SStephen Rothwell
233b8b572e1SStephen Rothwell #ifdef __powerpc64__
234b8b572e1SStephen Rothwell #define __get_user_asm2(x, addr, err) \
235b8b572e1SStephen Rothwell __get_user_asm(x, addr, err, "ld")
236b8b572e1SStephen Rothwell #else /* __powerpc64__ */
237b8b572e1SStephen Rothwell #define __get_user_asm2(x, addr, err) \
238b8b572e1SStephen Rothwell __asm__ __volatile__( \
239c20beffeSChristophe Leroy "1: lwz%X2 %1, %2\n" \
240c20beffeSChristophe Leroy "2: lwz%X2 %L1, %L2\n" \
241b8b572e1SStephen Rothwell "3:\n" \
242b8b572e1SStephen Rothwell ".section .fixup,\"ax\"\n" \
243b8b572e1SStephen Rothwell "4: li %0,%3\n" \
244b8b572e1SStephen Rothwell " li %1,0\n" \
245b8b572e1SStephen Rothwell " li %1+1,0\n" \
246b8b572e1SStephen Rothwell " b 3b\n" \
247b8b572e1SStephen Rothwell ".previous\n" \
24824bfa6a9SNicholas Piggin EX_TABLE(1b, 4b) \
24924bfa6a9SNicholas Piggin EX_TABLE(2b, 4b) \
250b8b572e1SStephen Rothwell : "=r" (err), "=&r" (x) \
251c20beffeSChristophe Leroy : "m" (*addr), "i" (-EFAULT), "0" (err))
252b8b572e1SStephen Rothwell #endif /* __powerpc64__ */
253b8b572e1SStephen Rothwell
2545cd62333SChristophe Leroy #define __get_user_size_allowed(x, ptr, size, retval) \
255b8b572e1SStephen Rothwell do { \
256b8b572e1SStephen Rothwell retval = 0; \
2579975f852SChristophe Leroy BUILD_BUG_ON(size > sizeof(x)); \
258b8b572e1SStephen Rothwell switch (size) { \
259c20beffeSChristophe Leroy case 1: __get_user_asm(x, (u8 __user *)ptr, retval, "lbz"); break; \
260c20beffeSChristophe Leroy case 2: __get_user_asm(x, (u16 __user *)ptr, retval, "lhz"); break; \
261c20beffeSChristophe Leroy case 4: __get_user_asm(x, (u32 __user *)ptr, retval, "lwz"); break; \
262c20beffeSChristophe Leroy case 8: __get_user_asm2(x, (u64 __user *)ptr, retval); break; \
263f9cd5f91SNathan Chancellor default: x = 0; BUILD_BUG(); \
264b8b572e1SStephen Rothwell } \
2655cd62333SChristophe Leroy } while (0)
2665cd62333SChristophe Leroy
267035785abSChristophe Leroy #define __get_user_size_goto(x, ptr, size, label) \
268035785abSChristophe Leroy do { \
269035785abSChristophe Leroy long __gus_retval; \
270035785abSChristophe Leroy \
271035785abSChristophe Leroy __get_user_size_allowed(x, ptr, size, __gus_retval); \
272035785abSChristophe Leroy if (__gus_retval) \
273035785abSChristophe Leroy goto label; \
274035785abSChristophe Leroy } while (0)
275035785abSChristophe Leroy
2765cd29b1fSChristophe Leroy #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
2775cd29b1fSChristophe Leroy
278f7a6947cSMichael Ellerman /*
279f7a6947cSMichael Ellerman * This is a type: either unsigned long, if the argument fits into
280f7a6947cSMichael Ellerman * that type, or otherwise unsigned long long.
281f7a6947cSMichael Ellerman */
282f7a6947cSMichael Ellerman #define __long_type(x) \
283f7a6947cSMichael Ellerman __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
284f7a6947cSMichael Ellerman
28517f8c0bcSChristophe Leroy #define __get_user(x, ptr) \
286b8b572e1SStephen Rothwell ({ \
287b8b572e1SStephen Rothwell long __gu_err; \
288f7a6947cSMichael Ellerman __long_type(*(ptr)) __gu_val; \
289e00d93acSAnton Blanchard __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
29017f8c0bcSChristophe Leroy __typeof__(sizeof(*(ptr))) __gu_size = sizeof(*(ptr)); \
291d02f6b7dSNicholas Piggin \
2921af1717dSMichael S. Tsirkin might_fault(); \
293e72fcdb2SChristophe Leroy allow_read_from_user(__gu_addr, __gu_size); \
294e72fcdb2SChristophe Leroy __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
295e72fcdb2SChristophe Leroy prevent_read_from_user(__gu_addr, __gu_size); \
296b8b572e1SStephen Rothwell (x) = (__typeof__(*(ptr)))__gu_val; \
297d02f6b7dSNicholas Piggin \
298b8b572e1SStephen Rothwell __gu_err; \
299b8b572e1SStephen Rothwell })
300b8b572e1SStephen Rothwell
30117f8c0bcSChristophe Leroy #define get_user(x, ptr) \
302b8b572e1SStephen Rothwell ({ \
303e72fcdb2SChristophe Leroy __typeof__(*(ptr)) __user *_gu_addr = (ptr); \
304d02f6b7dSNicholas Piggin \
305e72fcdb2SChristophe Leroy access_ok(_gu_addr, sizeof(*(ptr))) ? \
306e72fcdb2SChristophe Leroy __get_user(x, _gu_addr) : \
307e72fcdb2SChristophe Leroy ((x) = (__force __typeof__(*(ptr)))0, -EFAULT); \
308b8b572e1SStephen Rothwell })
309b8b572e1SStephen Rothwell
310b8b572e1SStephen Rothwell /* more complex routines */
311b8b572e1SStephen Rothwell
312b8b572e1SStephen Rothwell extern unsigned long __copy_tofrom_user(void __user *to,
313b8b572e1SStephen Rothwell const void __user *from, unsigned long size);
314b8b572e1SStephen Rothwell
315d6bd8194SMichael Ellerman #ifdef __powerpc64__
3163448890cSAl Viro static inline unsigned long
raw_copy_in_user(void __user * to,const void __user * from,unsigned long n)3173448890cSAl Viro raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
3183448890cSAl Viro {
319de78a9c4SChristophe Leroy unsigned long ret;
320de78a9c4SChristophe Leroy
3211d8f739bSChristophe Leroy allow_read_write_user(to, from, n);
322de78a9c4SChristophe Leroy ret = __copy_tofrom_user(to, from, n);
3231d8f739bSChristophe Leroy prevent_read_write_user(to, from, n);
324de78a9c4SChristophe Leroy return ret;
3253448890cSAl Viro }
326b8b572e1SStephen Rothwell #endif /* __powerpc64__ */
327b8b572e1SStephen Rothwell
raw_copy_from_user(void * to,const void __user * from,unsigned long n)3283448890cSAl Viro static inline unsigned long raw_copy_from_user(void *to,
329b8b572e1SStephen Rothwell const void __user *from, unsigned long n)
330b8b572e1SStephen Rothwell {
331de78a9c4SChristophe Leroy unsigned long ret;
3321d3c1324SKees Cook
333de78a9c4SChristophe Leroy allow_read_from_user(from, n);
334de78a9c4SChristophe Leroy ret = __copy_tofrom_user((__force void __user *)to, from, n);
335de78a9c4SChristophe Leroy prevent_read_from_user(from, n);
336de78a9c4SChristophe Leroy return ret;
337b8b572e1SStephen Rothwell }
338b8b572e1SStephen Rothwell
3395cd62333SChristophe Leroy static inline unsigned long
raw_copy_to_user(void __user * to,const void * from,unsigned long n)3405cd62333SChristophe Leroy raw_copy_to_user(void __user *to, const void *from, unsigned long n)
3415cd62333SChristophe Leroy {
3425cd62333SChristophe Leroy unsigned long ret;
3435cd62333SChristophe Leroy
344de78a9c4SChristophe Leroy allow_write_to_user(to, n);
345052f9d20SChristophe Leroy ret = __copy_tofrom_user(to, (__force const void __user *)from, n);
346de78a9c4SChristophe Leroy prevent_write_to_user(to, n);
347de78a9c4SChristophe Leroy return ret;
348b8b572e1SStephen Rothwell }
349b8b572e1SStephen Rothwell
35061e3acd8SAndrew Donnellan unsigned long __arch_clear_user(void __user *addr, unsigned long size);
351b8b572e1SStephen Rothwell
__clear_user(void __user * addr,unsigned long size)3527472199aSChristophe Leroy static inline unsigned long __clear_user(void __user *addr, unsigned long size)
353b8b572e1SStephen Rothwell {
3547472199aSChristophe Leroy unsigned long ret;
3557472199aSChristophe Leroy
3561af1717dSMichael S. Tsirkin might_fault();
357de78a9c4SChristophe Leroy allow_write_to_user(addr, size);
35861e3acd8SAndrew Donnellan ret = __arch_clear_user(addr, size);
359de78a9c4SChristophe Leroy prevent_write_to_user(addr, size);
360de78a9c4SChristophe Leroy return ret;
361b8b572e1SStephen Rothwell }
362b8b572e1SStephen Rothwell
clear_user(void __user * addr,unsigned long size)3637472199aSChristophe Leroy static inline unsigned long clear_user(void __user *addr, unsigned long size)
36461e3acd8SAndrew Donnellan {
3657472199aSChristophe Leroy return likely(access_ok(addr, size)) ? __clear_user(addr, size) : size;
36661e3acd8SAndrew Donnellan }
36761e3acd8SAndrew Donnellan
3681629372cSPaul Mackerras extern long strncpy_from_user(char *dst, const char __user *src, long count);
3691629372cSPaul Mackerras extern __must_check long strnlen_user(const char __user *str, long n);
370b8b572e1SStephen Rothwell
3714b8cda58SChristophe Leroy #ifdef CONFIG_ARCH_HAS_COPY_MC
3724b8cda58SChristophe Leroy unsigned long __must_check
3734b8cda58SChristophe Leroy copy_mc_generic(void *to, const void *from, unsigned long size);
3744b8cda58SChristophe Leroy
3754b8cda58SChristophe Leroy static inline unsigned long __must_check
copy_mc_to_kernel(void * to,const void * from,unsigned long size)3764b8cda58SChristophe Leroy copy_mc_to_kernel(void *to, const void *from, unsigned long size)
3774b8cda58SChristophe Leroy {
3784b8cda58SChristophe Leroy return copy_mc_generic(to, from, size);
3794b8cda58SChristophe Leroy }
3804b8cda58SChristophe Leroy #define copy_mc_to_kernel copy_mc_to_kernel
3814b8cda58SChristophe Leroy
3824b8cda58SChristophe Leroy static inline unsigned long __must_check
copy_mc_to_user(void __user * to,const void * from,unsigned long n)3834b8cda58SChristophe Leroy copy_mc_to_user(void __user *to, const void *from, unsigned long n)
3844b8cda58SChristophe Leroy {
3850e3c3b90SAl Viro if (check_copy_size(from, n, true)) {
3864b8cda58SChristophe Leroy if (access_ok(to, n)) {
3874b8cda58SChristophe Leroy allow_write_to_user(to, n);
3884b8cda58SChristophe Leroy n = copy_mc_generic((void *)to, from, n);
3894b8cda58SChristophe Leroy prevent_write_to_user(to, n);
3904b8cda58SChristophe Leroy }
3914b8cda58SChristophe Leroy }
3924b8cda58SChristophe Leroy
3934b8cda58SChristophe Leroy return n;
3944b8cda58SChristophe Leroy }
3954b8cda58SChristophe Leroy #endif
3964b8cda58SChristophe Leroy
3976c44741dSOliver O'Halloran extern long __copy_from_user_flushcache(void *dst, const void __user *src,
3986c44741dSOliver O'Halloran unsigned size);
3996c44741dSOliver O'Halloran
user_access_begin(const void __user * ptr,size_t len)400eb52f66fSChristophe Leroy static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
4015cd62333SChristophe Leroy {
4025cd62333SChristophe Leroy if (unlikely(!access_ok(ptr, len)))
4035cd62333SChristophe Leroy return false;
4047d506ca9SAlexey Kardashevskiy
4057d506ca9SAlexey Kardashevskiy might_fault();
4067d506ca9SAlexey Kardashevskiy
4075cd62333SChristophe Leroy allow_read_write_user((void __user *)ptr, ptr, len);
4085cd62333SChristophe Leroy return true;
4095cd62333SChristophe Leroy }
4105cd62333SChristophe Leroy #define user_access_begin user_access_begin
4115cd62333SChristophe Leroy #define user_access_end prevent_current_access_user
4123d7dfd63SChristophe Leroy #define user_access_save prevent_user_access_return
4133d7dfd63SChristophe Leroy #define user_access_restore restore_user_access
4145cd62333SChristophe Leroy
415eb52f66fSChristophe Leroy static __must_check __always_inline bool
user_read_access_begin(const void __user * ptr,size_t len)4164fe5cda9SChristophe Leroy user_read_access_begin(const void __user *ptr, size_t len)
4174fe5cda9SChristophe Leroy {
4184fe5cda9SChristophe Leroy if (unlikely(!access_ok(ptr, len)))
4194fe5cda9SChristophe Leroy return false;
4207d506ca9SAlexey Kardashevskiy
4217d506ca9SAlexey Kardashevskiy might_fault();
4227d506ca9SAlexey Kardashevskiy
4234fe5cda9SChristophe Leroy allow_read_from_user(ptr, len);
4244fe5cda9SChristophe Leroy return true;
4254fe5cda9SChristophe Leroy }
4264fe5cda9SChristophe Leroy #define user_read_access_begin user_read_access_begin
4274fe5cda9SChristophe Leroy #define user_read_access_end prevent_current_read_from_user
4284fe5cda9SChristophe Leroy
429eb52f66fSChristophe Leroy static __must_check __always_inline bool
user_write_access_begin(const void __user * ptr,size_t len)4304fe5cda9SChristophe Leroy user_write_access_begin(const void __user *ptr, size_t len)
4314fe5cda9SChristophe Leroy {
4324fe5cda9SChristophe Leroy if (unlikely(!access_ok(ptr, len)))
4334fe5cda9SChristophe Leroy return false;
4347d506ca9SAlexey Kardashevskiy
4357d506ca9SAlexey Kardashevskiy might_fault();
4367d506ca9SAlexey Kardashevskiy
4374fe5cda9SChristophe Leroy allow_write_to_user((void __user *)ptr, len);
4384fe5cda9SChristophe Leroy return true;
4394fe5cda9SChristophe Leroy }
4404fe5cda9SChristophe Leroy #define user_write_access_begin user_write_access_begin
4414fe5cda9SChristophe Leroy #define user_write_access_end prevent_current_write_to_user
4424fe5cda9SChristophe Leroy
4438cdf748dSChristophe Leroy #define unsafe_get_user(x, p, e) do { \
444f904c22fSChristophe Leroy __long_type(*(p)) __gu_val; \
445f904c22fSChristophe Leroy __typeof__(*(p)) __user *__gu_addr = (p); \
446f904c22fSChristophe Leroy \
447035785abSChristophe Leroy __get_user_size_goto(__gu_val, __gu_addr, sizeof(*(p)), e); \
448f904c22fSChristophe Leroy (x) = (__typeof__(*(p)))__gu_val; \
4498cdf748dSChristophe Leroy } while (0)
4508cdf748dSChristophe Leroy
451de4ffc65SMichael Ellerman #define unsafe_put_user(x, p, e) \
452be15a165SChristophe Leroy __put_user_size_goto((__typeof__(*(p)))(x), (p), sizeof(*(p)), e)
45317bc4336SChristophe Leroy
4549466c179SChristopher M. Riedl #define unsafe_copy_from_user(d, s, l, e) \
4559466c179SChristopher M. Riedl do { \
4569466c179SChristopher M. Riedl u8 *_dst = (u8 *)(d); \
4579466c179SChristopher M. Riedl const u8 __user *_src = (const u8 __user *)(s); \
4589466c179SChristopher M. Riedl size_t _len = (l); \
4599466c179SChristopher M. Riedl int _i; \
4609466c179SChristopher M. Riedl \
461c1cc1570SChristophe Leroy for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64)) \
462c1cc1570SChristophe Leroy unsafe_get_user(*(u64 *)(_dst + _i), (u64 __user *)(_src + _i), e); \
463c1cc1570SChristophe Leroy if (_len & 4) { \
4649466c179SChristopher M. Riedl unsafe_get_user(*(u32 *)(_dst + _i), (u32 __user *)(_src + _i), e); \
4659466c179SChristopher M. Riedl _i += 4; \
4669466c179SChristopher M. Riedl } \
4679466c179SChristopher M. Riedl if (_len & 2) { \
4689466c179SChristopher M. Riedl unsafe_get_user(*(u16 *)(_dst + _i), (u16 __user *)(_src + _i), e); \
4699466c179SChristopher M. Riedl _i += 2; \
4709466c179SChristopher M. Riedl } \
4719466c179SChristopher M. Riedl if (_len & 1) \
4729466c179SChristopher M. Riedl unsafe_get_user(*(u8 *)(_dst + _i), (u8 __user *)(_src + _i), e); \
4739466c179SChristopher M. Riedl } while (0)
4749466c179SChristopher M. Riedl
4755cd62333SChristophe Leroy #define unsafe_copy_to_user(d, s, l, e) \
47617bc4336SChristophe Leroy do { \
47717bc4336SChristophe Leroy u8 __user *_dst = (u8 __user *)(d); \
47817bc4336SChristophe Leroy const u8 *_src = (const u8 *)(s); \
47917bc4336SChristophe Leroy size_t _len = (l); \
48017bc4336SChristophe Leroy int _i; \
48117bc4336SChristophe Leroy \
482c6adc835SChristophe Leroy for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64)) \
483c6adc835SChristophe Leroy unsafe_put_user(*(u64 *)(_src + _i), (u64 __user *)(_dst + _i), e); \
484c6adc835SChristophe Leroy if (_len & 4) { \
485de4ffc65SMichael Ellerman unsafe_put_user(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e); \
48617bc4336SChristophe Leroy _i += 4; \
48717bc4336SChristophe Leroy } \
48817bc4336SChristophe Leroy if (_len & 2) { \
489de4ffc65SMichael Ellerman unsafe_put_user(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e); \
49017bc4336SChristophe Leroy _i += 2; \
49117bc4336SChristophe Leroy } \
49217bc4336SChristophe Leroy if (_len & 1) \
493de4ffc65SMichael Ellerman unsafe_put_user(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e); \
49417bc4336SChristophe Leroy } while (0)
4955cd62333SChristophe Leroy
496c3316525SChristoph Hellwig #define __get_kernel_nofault(dst, src, type, err_label) \
497035785abSChristophe Leroy __get_user_size_goto(*((type *)(dst)), \
498035785abSChristophe Leroy (__force type __user *)(src), sizeof(type), err_label)
499c3316525SChristoph Hellwig
500c3316525SChristoph Hellwig #define __put_kernel_nofault(dst, src, type, err_label) \
501c3316525SChristoph Hellwig __put_user_size_goto(*((type *)(src)), \
502c3316525SChristoph Hellwig (__force type __user *)(dst), sizeof(type), err_label)
503c3316525SChristoph Hellwig
504b8b572e1SStephen Rothwell #endif /* _ARCH_POWERPC_UACCESS_H */
505