1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2a439fe51SSam Ravnborg #ifndef _ASM_UACCESS_H
3a439fe51SSam Ravnborg #define _ASM_UACCESS_H
4a439fe51SSam Ravnborg
5a439fe51SSam Ravnborg /*
6a439fe51SSam Ravnborg * User space memory access functions
7a439fe51SSam Ravnborg */
8a439fe51SSam Ravnborg
9a439fe51SSam Ravnborg #include <linux/compiler.h>
10a439fe51SSam Ravnborg #include <linux/string.h>
11*428e106aSKirill A. Shutemov #include <linux/mm_types.h>
12a439fe51SSam Ravnborg #include <asm/asi.h>
13a439fe51SSam Ravnborg #include <asm/spitfire.h>
14*428e106aSKirill A. Shutemov #include <asm/pgtable.h>
15a439fe51SSam Ravnborg
162c66f623SDavid Miller #include <asm/processor.h>
17a5ad8378SArnd Bergmann #include <asm-generic/access_ok.h>
182c66f623SDavid Miller
19a439fe51SSam Ravnborg /*
20a439fe51SSam Ravnborg * Sparc64 is segmented, though more like the M68K than the I386.
21a439fe51SSam Ravnborg * We use the secondary ASI to address user memory, which references a
22a439fe51SSam Ravnborg * completely different VM map, thus there is zero chance of the user
23a439fe51SSam Ravnborg * doing something queer and tricking us into poking kernel memory.
24a439fe51SSam Ravnborg */
25a439fe51SSam Ravnborg
26b69fb769SDavid Ahern /*
27b69fb769SDavid Ahern * Test whether a block of memory is a valid user space address.
28b69fb769SDavid Ahern * Returns 0 if the range is valid, nonzero otherwise.
29b69fb769SDavid Ahern */
__chk_range_not_ok(unsigned long addr,unsigned long size,unsigned long limit)30b69fb769SDavid Ahern static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
31b69fb769SDavid Ahern {
32b69fb769SDavid Ahern if (__builtin_constant_p(size))
33b69fb769SDavid Ahern return addr > limit - size;
34b69fb769SDavid Ahern
35b69fb769SDavid Ahern addr += size;
36b69fb769SDavid Ahern if (addr < size)
37b69fb769SDavid Ahern return true;
38b69fb769SDavid Ahern
39b69fb769SDavid Ahern return addr > limit;
40b69fb769SDavid Ahern }
41b69fb769SDavid Ahern
42b69fb769SDavid Ahern #define __range_not_ok(addr, size, limit) \
43b69fb769SDavid Ahern ({ \
44b69fb769SDavid Ahern __chk_user_ptr(addr); \
45b69fb769SDavid Ahern __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
46b69fb769SDavid Ahern })
47b69fb769SDavid Ahern
48f05a6865SSam Ravnborg void __retl_efault(void);
49a439fe51SSam Ravnborg
50a439fe51SSam Ravnborg /* Uh, these should become the main single-value transfer routines..
51a439fe51SSam Ravnborg * They automatically use the right size if we just have the right
52a439fe51SSam Ravnborg * pointer type..
53a439fe51SSam Ravnborg *
54a439fe51SSam Ravnborg * This gets kind of ugly. We want to return _two_ values in "get_user()"
55a439fe51SSam Ravnborg * and yet we don't want to do any pointers, because that is too much
56a439fe51SSam Ravnborg * of a performance impact. Thus we have a few rather ugly macros here,
57a439fe51SSam Ravnborg * and hide all the ugliness from the user.
58a439fe51SSam Ravnborg */
59a439fe51SSam Ravnborg #define put_user(x, ptr) ({ \
60a439fe51SSam Ravnborg unsigned long __pu_addr = (unsigned long)(ptr); \
61a439fe51SSam Ravnborg __chk_user_ptr(ptr); \
627185820aSMichael S. Tsirkin __put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\
637185820aSMichael S. Tsirkin })
64a439fe51SSam Ravnborg
65a439fe51SSam Ravnborg #define get_user(x, ptr) ({ \
66a439fe51SSam Ravnborg unsigned long __gu_addr = (unsigned long)(ptr); \
67a439fe51SSam Ravnborg __chk_user_ptr(ptr); \
687185820aSMichael S. Tsirkin __get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\
697185820aSMichael S. Tsirkin })
70a439fe51SSam Ravnborg
71a439fe51SSam Ravnborg #define __put_user(x, ptr) put_user(x, ptr)
72a439fe51SSam Ravnborg #define __get_user(x, ptr) get_user(x, ptr)
73a439fe51SSam Ravnborg
74a439fe51SSam Ravnborg struct __large_struct { unsigned long buf[100]; };
75a439fe51SSam Ravnborg #define __m(x) ((struct __large_struct *)(x))
76a439fe51SSam Ravnborg
778afafbc9SArnd Bergmann #define __put_kernel_nofault(dst, src, type, label) \
788afafbc9SArnd Bergmann do { \
798afafbc9SArnd Bergmann type *addr = (type __force *)(dst); \
808afafbc9SArnd Bergmann type data = *(type *)src; \
818afafbc9SArnd Bergmann register int __pu_ret; \
828afafbc9SArnd Bergmann switch (sizeof(type)) { \
838afafbc9SArnd Bergmann case 1: __put_kernel_asm(data, b, addr, __pu_ret); break; \
848afafbc9SArnd Bergmann case 2: __put_kernel_asm(data, h, addr, __pu_ret); break; \
858afafbc9SArnd Bergmann case 4: __put_kernel_asm(data, w, addr, __pu_ret); break; \
868afafbc9SArnd Bergmann case 8: __put_kernel_asm(data, x, addr, __pu_ret); break; \
878afafbc9SArnd Bergmann default: __pu_ret = __put_user_bad(); break; \
888afafbc9SArnd Bergmann } \
898afafbc9SArnd Bergmann if (__pu_ret) \
908afafbc9SArnd Bergmann goto label; \
918afafbc9SArnd Bergmann } while (0)
928afafbc9SArnd Bergmann
938afafbc9SArnd Bergmann #define __put_kernel_asm(x, size, addr, ret) \
948afafbc9SArnd Bergmann __asm__ __volatile__( \
958afafbc9SArnd Bergmann "/* Put kernel asm, inline. */\n" \
968afafbc9SArnd Bergmann "1:\t" "st"#size " %1, [%2]\n\t" \
978afafbc9SArnd Bergmann "clr %0\n" \
988afafbc9SArnd Bergmann "2:\n\n\t" \
998afafbc9SArnd Bergmann ".section .fixup,#alloc,#execinstr\n\t" \
1008afafbc9SArnd Bergmann ".align 4\n" \
1018afafbc9SArnd Bergmann "3:\n\t" \
1028afafbc9SArnd Bergmann "sethi %%hi(2b), %0\n\t" \
1038afafbc9SArnd Bergmann "jmpl %0 + %%lo(2b), %%g0\n\t" \
1048afafbc9SArnd Bergmann " mov %3, %0\n\n\t" \
1058afafbc9SArnd Bergmann ".previous\n\t" \
1068afafbc9SArnd Bergmann ".section __ex_table,\"a\"\n\t" \
1078afafbc9SArnd Bergmann ".align 4\n\t" \
1088afafbc9SArnd Bergmann ".word 1b, 3b\n\t" \
1098afafbc9SArnd Bergmann ".previous\n\n\t" \
1108afafbc9SArnd Bergmann : "=r" (ret) : "r" (x), "r" (__m(addr)), \
1118afafbc9SArnd Bergmann "i" (-EFAULT))
1128afafbc9SArnd Bergmann
113a439fe51SSam Ravnborg #define __put_user_nocheck(data, addr, size) ({ \
114a439fe51SSam Ravnborg register int __pu_ret; \
115a439fe51SSam Ravnborg switch (size) { \
1164b636ba2SMichael S. Tsirkin case 1: __put_user_asm(data, b, addr, __pu_ret); break; \
1174b636ba2SMichael S. Tsirkin case 2: __put_user_asm(data, h, addr, __pu_ret); break; \
1184b636ba2SMichael S. Tsirkin case 4: __put_user_asm(data, w, addr, __pu_ret); break; \
1194b636ba2SMichael S. Tsirkin case 8: __put_user_asm(data, x, addr, __pu_ret); break; \
1204b636ba2SMichael S. Tsirkin default: __pu_ret = __put_user_bad(); break; \
1217185820aSMichael S. Tsirkin } \
1227185820aSMichael S. Tsirkin __pu_ret; \
1237185820aSMichael S. Tsirkin })
124a439fe51SSam Ravnborg
125a439fe51SSam Ravnborg #define __put_user_asm(x, size, addr, ret) \
126a439fe51SSam Ravnborg __asm__ __volatile__( \
127a439fe51SSam Ravnborg "/* Put user asm, inline. */\n" \
128a439fe51SSam Ravnborg "1:\t" "st"#size "a %1, [%2] %%asi\n\t" \
129a439fe51SSam Ravnborg "clr %0\n" \
130a439fe51SSam Ravnborg "2:\n\n\t" \
131a439fe51SSam Ravnborg ".section .fixup,#alloc,#execinstr\n\t" \
132a439fe51SSam Ravnborg ".align 4\n" \
133a439fe51SSam Ravnborg "3:\n\t" \
134a439fe51SSam Ravnborg "sethi %%hi(2b), %0\n\t" \
135a439fe51SSam Ravnborg "jmpl %0 + %%lo(2b), %%g0\n\t" \
136a439fe51SSam Ravnborg " mov %3, %0\n\n\t" \
137a439fe51SSam Ravnborg ".previous\n\t" \
138a439fe51SSam Ravnborg ".section __ex_table,\"a\"\n\t" \
139a439fe51SSam Ravnborg ".align 4\n\t" \
140a439fe51SSam Ravnborg ".word 1b, 3b\n\t" \
141a439fe51SSam Ravnborg ".previous\n\n\t" \
142a439fe51SSam Ravnborg : "=r" (ret) : "r" (x), "r" (__m(addr)), \
143a439fe51SSam Ravnborg "i" (-EFAULT))
144a439fe51SSam Ravnborg
145f05a6865SSam Ravnborg int __put_user_bad(void);
146a439fe51SSam Ravnborg
1478afafbc9SArnd Bergmann #define __get_kernel_nofault(dst, src, type, label) \
1488afafbc9SArnd Bergmann do { \
1498afafbc9SArnd Bergmann type *addr = (type __force *)(src); \
1508afafbc9SArnd Bergmann register int __gu_ret; \
1518afafbc9SArnd Bergmann register unsigned long __gu_val; \
1528afafbc9SArnd Bergmann switch (sizeof(type)) { \
1538afafbc9SArnd Bergmann case 1: __get_kernel_asm(__gu_val, ub, addr, __gu_ret); break; \
1548afafbc9SArnd Bergmann case 2: __get_kernel_asm(__gu_val, uh, addr, __gu_ret); break; \
1558afafbc9SArnd Bergmann case 4: __get_kernel_asm(__gu_val, uw, addr, __gu_ret); break; \
1568afafbc9SArnd Bergmann case 8: __get_kernel_asm(__gu_val, x, addr, __gu_ret); break; \
1578afafbc9SArnd Bergmann default: \
1588afafbc9SArnd Bergmann __gu_val = 0; \
1598afafbc9SArnd Bergmann __gu_ret = __get_user_bad(); \
1608afafbc9SArnd Bergmann break; \
1618afafbc9SArnd Bergmann } \
1628afafbc9SArnd Bergmann if (__gu_ret) \
1638afafbc9SArnd Bergmann goto label; \
1648afafbc9SArnd Bergmann *(type *)dst = (__force type) __gu_val; \
1658afafbc9SArnd Bergmann } while (0)
1668afafbc9SArnd Bergmann #define __get_kernel_asm(x, size, addr, ret) \
1678afafbc9SArnd Bergmann __asm__ __volatile__( \
1688afafbc9SArnd Bergmann "/* Get kernel asm, inline. */\n" \
1698afafbc9SArnd Bergmann "1:\t" "ld"#size " [%2], %1\n\t" \
1708afafbc9SArnd Bergmann "clr %0\n" \
1718afafbc9SArnd Bergmann "2:\n\n\t" \
1728afafbc9SArnd Bergmann ".section .fixup,#alloc,#execinstr\n\t" \
1738afafbc9SArnd Bergmann ".align 4\n" \
1748afafbc9SArnd Bergmann "3:\n\t" \
1758afafbc9SArnd Bergmann "sethi %%hi(2b), %0\n\t" \
1768afafbc9SArnd Bergmann "clr %1\n\t" \
1778afafbc9SArnd Bergmann "jmpl %0 + %%lo(2b), %%g0\n\t" \
1788afafbc9SArnd Bergmann " mov %3, %0\n\n\t" \
1798afafbc9SArnd Bergmann ".previous\n\t" \
1808afafbc9SArnd Bergmann ".section __ex_table,\"a\"\n\t" \
1818afafbc9SArnd Bergmann ".align 4\n\t" \
1828afafbc9SArnd Bergmann ".word 1b, 3b\n\n\t" \
1838afafbc9SArnd Bergmann ".previous\n\t" \
1848afafbc9SArnd Bergmann : "=r" (ret), "=r" (x) : "r" (__m(addr)), \
1858afafbc9SArnd Bergmann "i" (-EFAULT))
1868afafbc9SArnd Bergmann
187a439fe51SSam Ravnborg #define __get_user_nocheck(data, addr, size, type) ({ \
188a439fe51SSam Ravnborg register int __gu_ret; \
189a439fe51SSam Ravnborg register unsigned long __gu_val; \
190a439fe51SSam Ravnborg switch (size) { \
1914b636ba2SMichael S. Tsirkin case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
1924b636ba2SMichael S. Tsirkin case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
1934b636ba2SMichael S. Tsirkin case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \
1944b636ba2SMichael S. Tsirkin case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break; \
1957185820aSMichael S. Tsirkin default: \
1967185820aSMichael S. Tsirkin __gu_val = 0; \
1977185820aSMichael S. Tsirkin __gu_ret = __get_user_bad(); \
1987185820aSMichael S. Tsirkin break; \
1997185820aSMichael S. Tsirkin } \
2007185820aSMichael S. Tsirkin data = (__force type) __gu_val; \
2017185820aSMichael S. Tsirkin __gu_ret; \
2027185820aSMichael S. Tsirkin })
203a439fe51SSam Ravnborg
204a439fe51SSam Ravnborg #define __get_user_asm(x, size, addr, ret) \
205a439fe51SSam Ravnborg __asm__ __volatile__( \
206a439fe51SSam Ravnborg "/* Get user asm, inline. */\n" \
207a439fe51SSam Ravnborg "1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \
208a439fe51SSam Ravnborg "clr %0\n" \
209a439fe51SSam Ravnborg "2:\n\n\t" \
210a439fe51SSam Ravnborg ".section .fixup,#alloc,#execinstr\n\t" \
211a439fe51SSam Ravnborg ".align 4\n" \
212a439fe51SSam Ravnborg "3:\n\t" \
213a439fe51SSam Ravnborg "sethi %%hi(2b), %0\n\t" \
214a439fe51SSam Ravnborg "clr %1\n\t" \
215a439fe51SSam Ravnborg "jmpl %0 + %%lo(2b), %%g0\n\t" \
216a439fe51SSam Ravnborg " mov %3, %0\n\n\t" \
217a439fe51SSam Ravnborg ".previous\n\t" \
218a439fe51SSam Ravnborg ".section __ex_table,\"a\"\n\t" \
219a439fe51SSam Ravnborg ".align 4\n\t" \
220a439fe51SSam Ravnborg ".word 1b, 3b\n\n\t" \
221a439fe51SSam Ravnborg ".previous\n\t" \
222a439fe51SSam Ravnborg : "=r" (ret), "=r" (x) : "r" (__m(addr)), \
223a439fe51SSam Ravnborg "i" (-EFAULT))
224a439fe51SSam Ravnborg
225f05a6865SSam Ravnborg int __get_user_bad(void);
226a439fe51SSam Ravnborg
22731af2f36SAl Viro unsigned long __must_check raw_copy_from_user(void *to,
228a439fe51SSam Ravnborg const void __user *from,
229a439fe51SSam Ravnborg unsigned long size);
2309d9208a1SKees Cook
23131af2f36SAl Viro unsigned long __must_check raw_copy_to_user(void __user *to,
232a439fe51SSam Ravnborg const void *from,
233a439fe51SSam Ravnborg unsigned long size);
23431af2f36SAl Viro #define INLINE_COPY_FROM_USER
23531af2f36SAl Viro #define INLINE_COPY_TO_USER
23681409e9eSKees Cook
23731af2f36SAl Viro unsigned long __must_check raw_copy_in_user(void __user *to,
238a439fe51SSam Ravnborg const void __user *from,
239a439fe51SSam Ravnborg unsigned long size);
240a439fe51SSam Ravnborg
241f05a6865SSam Ravnborg unsigned long __must_check __clear_user(void __user *, unsigned long);
242a439fe51SSam Ravnborg
243a439fe51SSam Ravnborg #define clear_user __clear_user
244a439fe51SSam Ravnborg
245f05a6865SSam Ravnborg __must_check long strnlen_user(const char __user *str, long n);
246a439fe51SSam Ravnborg
247f88620b9SDavid S. Miller struct pt_regs;
248f05a6865SSam Ravnborg unsigned long compute_effective_address(struct pt_regs *,
249f88620b9SDavid S. Miller unsigned int insn,
250f88620b9SDavid S. Miller unsigned int rd);
251f88620b9SDavid S. Miller
252a439fe51SSam Ravnborg #endif /* _ASM_UACCESS_H */
253