1d2912cb1SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
24baa9922SRussell King /*
34baa9922SRussell King * arch/arm/include/asm/uaccess.h
44baa9922SRussell King */
54baa9922SRussell King #ifndef _ASMARM_UACCESS_H
64baa9922SRussell King #define _ASMARM_UACCESS_H
74baa9922SRussell King
84baa9922SRussell King /*
94baa9922SRussell King * User space memory access functions
104baa9922SRussell King */
1187c52578SRussell King #include <linux/string.h>
12a9ff6961SLinus Walleij #include <asm/page.h>
134baa9922SRussell King #include <asm/domain.h>
1415420269SArd Biesheuvel #include <asm/unaligned.h>
158b592783SCatalin Marinas #include <asm/unified.h>
169f97da78SDavid Howells #include <asm/compiler.h>
174baa9922SRussell King
180f9b38cdSAl Viro #include <asm/extable.h>
194baa9922SRussell King
204baa9922SRussell King /*
213fba7e23SRussell King * These two functions allow hooking accesses to userspace to increase
223fba7e23SRussell King * system integrity by ensuring that the kernel can not inadvertantly
233fba7e23SRussell King * perform such accesses (eg, via list poison values) which could then
243fba7e23SRussell King * be exploited for priviledge escalation.
253fba7e23SRussell King */
uaccess_save_and_enable(void)26851140abSMasahiro Yamada static __always_inline unsigned int uaccess_save_and_enable(void)
273fba7e23SRussell King {
28a5e090acSRussell King #ifdef CONFIG_CPU_SW_DOMAIN_PAN
29a5e090acSRussell King unsigned int old_domain = get_domain();
30a5e090acSRussell King
31a5e090acSRussell King /* Set the current domain access to permit user accesses */
32a5e090acSRussell King set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
33a5e090acSRussell King domain_val(DOMAIN_USER, DOMAIN_CLIENT));
34a5e090acSRussell King
35a5e090acSRussell King return old_domain;
36a5e090acSRussell King #else
373fba7e23SRussell King return 0;
38a5e090acSRussell King #endif
393fba7e23SRussell King }
403fba7e23SRussell King
uaccess_restore(unsigned int flags)41851140abSMasahiro Yamada static __always_inline void uaccess_restore(unsigned int flags)
423fba7e23SRussell King {
43a5e090acSRussell King #ifdef CONFIG_CPU_SW_DOMAIN_PAN
44a5e090acSRussell King /* Restore the user access mask */
45a5e090acSRussell King set_domain(flags);
46a5e090acSRussell King #endif
473fba7e23SRussell King }
483fba7e23SRussell King
493fba7e23SRussell King /*
504baa9922SRussell King * These two are intentionally not defined anywhere - if the kernel
514baa9922SRussell King * code generates any references to them, that's a bug.
524baa9922SRussell King */
534baa9922SRussell King extern int __get_user_bad(void);
544baa9922SRussell King extern int __put_user_bad(void);
554baa9922SRussell King
564baa9922SRussell King #ifdef CONFIG_MMU
574baa9922SRussell King
582350ebe2SRussell King /*
59d09fbb32SRussell King * This is a type: either unsigned long, if the argument fits into
60d09fbb32SRussell King * that type, or otherwise unsigned long long.
61d09fbb32SRussell King */
62d09fbb32SRussell King #define __inttype(x) \
63d09fbb32SRussell King __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
64d09fbb32SRussell King
65d09fbb32SRussell King /*
66afaf6838SJulien Thierry * Sanitise a uaccess pointer such that it becomes NULL if addr+size
67afaf6838SJulien Thierry * is above the current addr_limit.
68afaf6838SJulien Thierry */
69afaf6838SJulien Thierry #define uaccess_mask_range_ptr(ptr, size) \
70afaf6838SJulien Thierry ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
__uaccess_mask_range_ptr(const void __user * ptr,size_t size)71afaf6838SJulien Thierry static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
72afaf6838SJulien Thierry size_t size)
73afaf6838SJulien Thierry {
74afaf6838SJulien Thierry void __user *safe_ptr = (void __user *)ptr;
75afaf6838SJulien Thierry unsigned long tmp;
76afaf6838SJulien Thierry
77afaf6838SJulien Thierry asm volatile(
78fe09d9c6SStefan Agner " .syntax unified\n"
79afaf6838SJulien Thierry " sub %1, %3, #1\n"
80afaf6838SJulien Thierry " subs %1, %1, %0\n"
81afaf6838SJulien Thierry " addhs %1, %1, #1\n"
82fe09d9c6SStefan Agner " subshs %1, %1, %2\n"
83afaf6838SJulien Thierry " movlo %0, #0\n"
84afaf6838SJulien Thierry : "+r" (safe_ptr), "=&r" (tmp)
858ac6f5d7SArnd Bergmann : "r" (size), "r" (TASK_SIZE)
86afaf6838SJulien Thierry : "cc");
87afaf6838SJulien Thierry
88afaf6838SJulien Thierry csdb();
89afaf6838SJulien Thierry return safe_ptr;
90afaf6838SJulien Thierry }
91afaf6838SJulien Thierry
92afaf6838SJulien Thierry /*
934baa9922SRussell King * Single-value transfer routines. They automatically use the right
944baa9922SRussell King * size if we just have the right pointer type. Note that the functions
954baa9922SRussell King * which read from user space (*get_*) need to take care not to leak
964baa9922SRussell King * kernel data even if the calling code is buggy and fails to check
974baa9922SRussell King * the return value. This means zeroing out the destination variable
984baa9922SRussell King * or buffer on error. Normally this is done out of line by the
994baa9922SRussell King * fixup code, but there are a few places where it intrudes on the
1004baa9922SRussell King * main code path. When we only write to user space, there is no
1014baa9922SRussell King * problem.
1024baa9922SRussell King */
1034baa9922SRussell King extern int __get_user_1(void *);
1044baa9922SRussell King extern int __get_user_2(void *);
1054baa9922SRussell King extern int __get_user_4(void *);
106d9981380SVictor Kamensky extern int __get_user_32t_8(void *);
107e38361d0SDaniel Thompson extern int __get_user_8(void *);
108d9981380SVictor Kamensky extern int __get_user_64t_1(void *);
109d9981380SVictor Kamensky extern int __get_user_64t_2(void *);
110d9981380SVictor Kamensky extern int __get_user_64t_4(void *);
1114baa9922SRussell King
1128404663fSRussell King #define __get_user_x(__r2, __p, __e, __l, __s) \
1134baa9922SRussell King __asm__ __volatile__ ( \
1144baa9922SRussell King __asmeq("%0", "r0") __asmeq("%1", "r2") \
1158404663fSRussell King __asmeq("%3", "r1") \
1164baa9922SRussell King "bl __get_user_" #__s \
1174baa9922SRussell King : "=&r" (__e), "=r" (__r2) \
1188404663fSRussell King : "0" (__p), "r" (__l) \
119*170ce55eSMasahiro Yamada : "ip", "lr", "cc")
1204baa9922SRussell King
121e38361d0SDaniel Thompson /* narrowing a double-word get into a single 32bit word register: */
122e38361d0SDaniel Thompson #ifdef __ARMEB__
123d9981380SVictor Kamensky #define __get_user_x_32t(__r2, __p, __e, __l, __s) \
124d9981380SVictor Kamensky __get_user_x(__r2, __p, __e, __l, 32t_8)
125e38361d0SDaniel Thompson #else
126d9981380SVictor Kamensky #define __get_user_x_32t __get_user_x
127e38361d0SDaniel Thompson #endif
128e38361d0SDaniel Thompson
129d9981380SVictor Kamensky /*
130d9981380SVictor Kamensky * storing result into proper least significant word of 64bit target var,
131d9981380SVictor Kamensky * different only for big endian case where 64 bit __r2 lsw is r3:
132d9981380SVictor Kamensky */
133d9981380SVictor Kamensky #ifdef __ARMEB__
134d9981380SVictor Kamensky #define __get_user_x_64t(__r2, __p, __e, __l, __s) \
135d9981380SVictor Kamensky __asm__ __volatile__ ( \
136d9981380SVictor Kamensky __asmeq("%0", "r0") __asmeq("%1", "r2") \
137d9981380SVictor Kamensky __asmeq("%3", "r1") \
138d9981380SVictor Kamensky "bl __get_user_64t_" #__s \
139d9981380SVictor Kamensky : "=&r" (__e), "=r" (__r2) \
140d9981380SVictor Kamensky : "0" (__p), "r" (__l) \
141*170ce55eSMasahiro Yamada : "ip", "lr", "cc")
142d9981380SVictor Kamensky #else
143d9981380SVictor Kamensky #define __get_user_x_64t __get_user_x
144d9981380SVictor Kamensky #endif
145d9981380SVictor Kamensky
146d9981380SVictor Kamensky
147ad72907aSWill Deacon #define __get_user_check(x, p) \
1484baa9922SRussell King ({ \
1498ac6f5d7SArnd Bergmann unsigned long __limit = TASK_SIZE - 1; \
150db4667a8SStefan Agner register typeof(*(p)) __user *__p asm("r0") = (p); \
151d09fbb32SRussell King register __inttype(x) __r2 asm("r2"); \
1528404663fSRussell King register unsigned long __l asm("r1") = __limit; \
1534baa9922SRussell King register int __e asm("r0"); \
1543fba7e23SRussell King unsigned int __ua_flags = uaccess_save_and_enable(); \
155df909df0SLexi Shao int __tmp_e; \
1564baa9922SRussell King switch (sizeof(*(__p))) { \
1574baa9922SRussell King case 1: \
158d9981380SVictor Kamensky if (sizeof((x)) >= 8) \
159d9981380SVictor Kamensky __get_user_x_64t(__r2, __p, __e, __l, 1); \
160d9981380SVictor Kamensky else \
1618404663fSRussell King __get_user_x(__r2, __p, __e, __l, 1); \
1624baa9922SRussell King break; \
1634baa9922SRussell King case 2: \
164d9981380SVictor Kamensky if (sizeof((x)) >= 8) \
165d9981380SVictor Kamensky __get_user_x_64t(__r2, __p, __e, __l, 2); \
166d9981380SVictor Kamensky else \
1678404663fSRussell King __get_user_x(__r2, __p, __e, __l, 2); \
1684baa9922SRussell King break; \
1694baa9922SRussell King case 4: \
170d9981380SVictor Kamensky if (sizeof((x)) >= 8) \
171d9981380SVictor Kamensky __get_user_x_64t(__r2, __p, __e, __l, 4); \
172d9981380SVictor Kamensky else \
1738404663fSRussell King __get_user_x(__r2, __p, __e, __l, 4); \
1744baa9922SRussell King break; \
175e38361d0SDaniel Thompson case 8: \
176e38361d0SDaniel Thompson if (sizeof((x)) < 8) \
177d9981380SVictor Kamensky __get_user_x_32t(__r2, __p, __e, __l, 4); \
178e38361d0SDaniel Thompson else \
179e38361d0SDaniel Thompson __get_user_x(__r2, __p, __e, __l, 8); \
180e38361d0SDaniel Thompson break; \
1814baa9922SRussell King default: __e = __get_user_bad(); break; \
1824baa9922SRussell King } \
183df909df0SLexi Shao __tmp_e = __e; \
1843fba7e23SRussell King uaccess_restore(__ua_flags); \
1854baa9922SRussell King x = (typeof(*(p))) __r2; \
186df909df0SLexi Shao __tmp_e; \
1874baa9922SRussell King })
1884baa9922SRussell King
189ad72907aSWill Deacon #define get_user(x, p) \
190ad72907aSWill Deacon ({ \
191ad72907aSWill Deacon might_fault(); \
192ad72907aSWill Deacon __get_user_check(x, p); \
193ad72907aSWill Deacon })
194ad72907aSWill Deacon
1954baa9922SRussell King extern int __put_user_1(void *, unsigned int);
1964baa9922SRussell King extern int __put_user_2(void *, unsigned int);
1974baa9922SRussell King extern int __put_user_4(void *, unsigned int);
1984baa9922SRussell King extern int __put_user_8(void *, unsigned long long);
1994baa9922SRussell King
2009f73bd8bSRussell King #define __put_user_check(__pu_val, __ptr, __err, __s) \
2019f73bd8bSRussell King ({ \
2028ac6f5d7SArnd Bergmann unsigned long __limit = TASK_SIZE - 1; \
2039f73bd8bSRussell King register typeof(__pu_val) __r2 asm("r2") = __pu_val; \
2049f73bd8bSRussell King register const void __user *__p asm("r0") = __ptr; \
2059f73bd8bSRussell King register unsigned long __l asm("r1") = __limit; \
2069f73bd8bSRussell King register int __e asm("r0"); \
2074baa9922SRussell King __asm__ __volatile__ ( \
2084baa9922SRussell King __asmeq("%0", "r0") __asmeq("%2", "r2") \
2098404663fSRussell King __asmeq("%3", "r1") \
2104baa9922SRussell King "bl __put_user_" #__s \
2114baa9922SRussell King : "=&r" (__e) \
2128404663fSRussell King : "0" (__p), "r" (__r2), "r" (__l) \
2139f73bd8bSRussell King : "ip", "lr", "cc"); \
2149f73bd8bSRussell King __err = __e; \
215ad72907aSWill Deacon })
216ad72907aSWill Deacon
2174baa9922SRussell King #else /* CONFIG_MMU */
2184baa9922SRussell King
2194baa9922SRussell King #define get_user(x, p) __get_user(x, p)
2209f73bd8bSRussell King #define __put_user_check __put_user_nocheck
2214baa9922SRussell King
2224baa9922SRussell King #endif /* CONFIG_MMU */
2234baa9922SRussell King
22412700c17SArnd Bergmann #include <asm-generic/access_ok.h>
2254baa9922SRussell King
226b1cd0a14SRussell King #ifdef CONFIG_CPU_SPECTRE
227b1cd0a14SRussell King /*
228b1cd0a14SRussell King * When mitigating Spectre variant 1, it is not worth fixing the non-
229b1cd0a14SRussell King * verifying accessors, because we need to add verification of the
230b1cd0a14SRussell King * address space there. Force these to use the standard get_user()
231b1cd0a14SRussell King * version instead.
232b1cd0a14SRussell King */
233b1cd0a14SRussell King #define __get_user(x, ptr) get_user(x, ptr)
234b1cd0a14SRussell King #else
235b1cd0a14SRussell King
2364baa9922SRussell King /*
2374baa9922SRussell King * The "__xxx" versions of the user access functions do not verify the
2384baa9922SRussell King * address space - it must have been done previously with a separate
2394baa9922SRussell King * "access_ok()" call.
2404baa9922SRussell King *
2414baa9922SRussell King * The "xxx_error" versions set the third argument to EFAULT if an
2424baa9922SRussell King * error occurs, and leave it unchanged on success. Note that these
2434baa9922SRussell King * versions are void (ie, don't return a value as such).
2444baa9922SRussell King */
2454baa9922SRussell King #define __get_user(x, ptr) \
2464baa9922SRussell King ({ \
2474baa9922SRussell King long __gu_err = 0; \
2482df4c9a7SArnd Bergmann __get_user_err((x), (ptr), __gu_err, TUSER()); \
2494baa9922SRussell King __gu_err; \
2504baa9922SRussell King })
2514baa9922SRussell King
2522df4c9a7SArnd Bergmann #define __get_user_err(x, ptr, err, __t) \
2534baa9922SRussell King do { \
2544baa9922SRussell King unsigned long __gu_addr = (unsigned long)(ptr); \
2554baa9922SRussell King unsigned long __gu_val; \
2563fba7e23SRussell King unsigned int __ua_flags; \
2574baa9922SRussell King __chk_user_ptr(ptr); \
258ad72907aSWill Deacon might_fault(); \
2593fba7e23SRussell King __ua_flags = uaccess_save_and_enable(); \
2604baa9922SRussell King switch (sizeof(*(ptr))) { \
2612df4c9a7SArnd Bergmann case 1: __get_user_asm_byte(__gu_val, __gu_addr, err, __t); break; \
2622df4c9a7SArnd Bergmann case 2: __get_user_asm_half(__gu_val, __gu_addr, err, __t); break; \
2632df4c9a7SArnd Bergmann case 4: __get_user_asm_word(__gu_val, __gu_addr, err, __t); break; \
2644baa9922SRussell King default: (__gu_val) = __get_user_bad(); \
2654baa9922SRussell King } \
2663fba7e23SRussell King uaccess_restore(__ua_flags); \
2674baa9922SRussell King (x) = (__typeof__(*(ptr)))__gu_val; \
2684baa9922SRussell King } while (0)
2692df4c9a7SArnd Bergmann #endif
2704baa9922SRussell King
271b64d1f66SRussell King #define __get_user_asm(x, addr, err, instr) \
2724baa9922SRussell King __asm__ __volatile__( \
2732df4c9a7SArnd Bergmann "1: " instr " %1, [%2], #0\n" \
2744baa9922SRussell King "2:\n" \
275c4a84ae3SArd Biesheuvel " .pushsection .text.fixup,\"ax\"\n" \
2764baa9922SRussell King " .align 2\n" \
2774baa9922SRussell King "3: mov %0, %3\n" \
2784baa9922SRussell King " mov %1, #0\n" \
2794baa9922SRussell King " b 2b\n" \
2804260415fSRussell King " .popsection\n" \
2814260415fSRussell King " .pushsection __ex_table,\"a\"\n" \
2824baa9922SRussell King " .align 3\n" \
2834baa9922SRussell King " .long 1b, 3b\n" \
2844260415fSRussell King " .popsection" \
2854baa9922SRussell King : "+r" (err), "=&r" (x) \
2864baa9922SRussell King : "r" (addr), "i" (-EFAULT) \
2874baa9922SRussell King : "cc")
2884baa9922SRussell King
2892df4c9a7SArnd Bergmann #define __get_user_asm_byte(x, addr, err, __t) \
2902df4c9a7SArnd Bergmann __get_user_asm(x, addr, err, "ldrb" __t)
291b64d1f66SRussell King
292344eb553SVincent Whitchurch #if __LINUX_ARM_ARCH__ >= 6
293344eb553SVincent Whitchurch
2942df4c9a7SArnd Bergmann #define __get_user_asm_half(x, addr, err, __t) \
2952df4c9a7SArnd Bergmann __get_user_asm(x, addr, err, "ldrh" __t)
296344eb553SVincent Whitchurch
297344eb553SVincent Whitchurch #else
298344eb553SVincent Whitchurch
2994baa9922SRussell King #ifndef __ARMEB__
3002df4c9a7SArnd Bergmann #define __get_user_asm_half(x, __gu_addr, err, __t) \
3014baa9922SRussell King ({ \
3024baa9922SRussell King unsigned long __b1, __b2; \
3032df4c9a7SArnd Bergmann __get_user_asm_byte(__b1, __gu_addr, err, __t); \
3042df4c9a7SArnd Bergmann __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \
3054baa9922SRussell King (x) = __b1 | (__b2 << 8); \
3064baa9922SRussell King })
3074baa9922SRussell King #else
3082df4c9a7SArnd Bergmann #define __get_user_asm_half(x, __gu_addr, err, __t) \
3094baa9922SRussell King ({ \
3104baa9922SRussell King unsigned long __b1, __b2; \
3112df4c9a7SArnd Bergmann __get_user_asm_byte(__b1, __gu_addr, err, __t); \
3122df4c9a7SArnd Bergmann __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \
3134baa9922SRussell King (x) = (__b1 << 8) | __b2; \
3144baa9922SRussell King })
3154baa9922SRussell King #endif
3164baa9922SRussell King
317344eb553SVincent Whitchurch #endif /* __LINUX_ARM_ARCH__ >= 6 */
318344eb553SVincent Whitchurch
3192df4c9a7SArnd Bergmann #define __get_user_asm_word(x, addr, err, __t) \
3202df4c9a7SArnd Bergmann __get_user_asm(x, addr, err, "ldr" __t)
3219f73bd8bSRussell King
3229f73bd8bSRussell King #define __put_user_switch(x, ptr, __err, __fn) \
3239f73bd8bSRussell King do { \
3249f73bd8bSRussell King const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
3259f73bd8bSRussell King __typeof__(*(ptr)) __pu_val = (x); \
3269f73bd8bSRussell King unsigned int __ua_flags; \
3279f73bd8bSRussell King might_fault(); \
3289f73bd8bSRussell King __ua_flags = uaccess_save_and_enable(); \
3299f73bd8bSRussell King switch (sizeof(*(ptr))) { \
3309f73bd8bSRussell King case 1: __fn(__pu_val, __pu_ptr, __err, 1); break; \
3319f73bd8bSRussell King case 2: __fn(__pu_val, __pu_ptr, __err, 2); break; \
3329f73bd8bSRussell King case 4: __fn(__pu_val, __pu_ptr, __err, 4); break; \
3339f73bd8bSRussell King case 8: __fn(__pu_val, __pu_ptr, __err, 8); break; \
3349f73bd8bSRussell King default: __err = __put_user_bad(); break; \
3359f73bd8bSRussell King } \
3369f73bd8bSRussell King uaccess_restore(__ua_flags); \
3379f73bd8bSRussell King } while (0)
3389f73bd8bSRussell King
3399f73bd8bSRussell King #define put_user(x, ptr) \
3409f73bd8bSRussell King ({ \
3419f73bd8bSRussell King int __pu_err = 0; \
3429f73bd8bSRussell King __put_user_switch((x), (ptr), __pu_err, __put_user_check); \
3439f73bd8bSRussell King __pu_err; \
3449f73bd8bSRussell King })
3459f73bd8bSRussell King
346e3aa6243SJulien Thierry #ifdef CONFIG_CPU_SPECTRE
347e3aa6243SJulien Thierry /*
348e3aa6243SJulien Thierry * When mitigating Spectre variant 1.1, all accessors need to include
349e3aa6243SJulien Thierry * verification of the address space.
350e3aa6243SJulien Thierry */
351e3aa6243SJulien Thierry #define __put_user(x, ptr) put_user(x, ptr)
352e3aa6243SJulien Thierry
353e3aa6243SJulien Thierry #else
3544baa9922SRussell King #define __put_user(x, ptr) \
3554baa9922SRussell King ({ \
3564baa9922SRussell King long __pu_err = 0; \
3579f73bd8bSRussell King __put_user_switch((x), (ptr), __pu_err, __put_user_nocheck); \
3584baa9922SRussell King __pu_err; \
3594baa9922SRussell King })
3604baa9922SRussell King
3619f73bd8bSRussell King #define __put_user_nocheck(x, __pu_ptr, __err, __size) \
3624baa9922SRussell King do { \
3639f73bd8bSRussell King unsigned long __pu_addr = (unsigned long)__pu_ptr; \
3642df4c9a7SArnd Bergmann __put_user_nocheck_##__size(x, __pu_addr, __err, TUSER());\
3654baa9922SRussell King } while (0)
3664baa9922SRussell King
3679f73bd8bSRussell King #define __put_user_nocheck_1 __put_user_asm_byte
3689f73bd8bSRussell King #define __put_user_nocheck_2 __put_user_asm_half
3699f73bd8bSRussell King #define __put_user_nocheck_4 __put_user_asm_word
3709f73bd8bSRussell King #define __put_user_nocheck_8 __put_user_asm_dword
3719f73bd8bSRussell King
3722df4c9a7SArnd Bergmann #endif /* !CONFIG_CPU_SPECTRE */
3732df4c9a7SArnd Bergmann
374b64d1f66SRussell King #define __put_user_asm(x, __pu_addr, err, instr) \
3754baa9922SRussell King __asm__ __volatile__( \
3762df4c9a7SArnd Bergmann "1: " instr " %1, [%2], #0\n" \
3774baa9922SRussell King "2:\n" \
378c4a84ae3SArd Biesheuvel " .pushsection .text.fixup,\"ax\"\n" \
3794baa9922SRussell King " .align 2\n" \
3804baa9922SRussell King "3: mov %0, %3\n" \
3814baa9922SRussell King " b 2b\n" \
3824260415fSRussell King " .popsection\n" \
3834260415fSRussell King " .pushsection __ex_table,\"a\"\n" \
3844baa9922SRussell King " .align 3\n" \
3854baa9922SRussell King " .long 1b, 3b\n" \
3864260415fSRussell King " .popsection" \
3874baa9922SRussell King : "+r" (err) \
3884baa9922SRussell King : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
3894baa9922SRussell King : "cc")
3904baa9922SRussell King
3912df4c9a7SArnd Bergmann #define __put_user_asm_byte(x, __pu_addr, err, __t) \
3922df4c9a7SArnd Bergmann __put_user_asm(x, __pu_addr, err, "strb" __t)
393b64d1f66SRussell King
394344eb553SVincent Whitchurch #if __LINUX_ARM_ARCH__ >= 6
395344eb553SVincent Whitchurch
3962df4c9a7SArnd Bergmann #define __put_user_asm_half(x, __pu_addr, err, __t) \
3972df4c9a7SArnd Bergmann __put_user_asm(x, __pu_addr, err, "strh" __t)
398344eb553SVincent Whitchurch
399344eb553SVincent Whitchurch #else
400344eb553SVincent Whitchurch
4014baa9922SRussell King #ifndef __ARMEB__
4022df4c9a7SArnd Bergmann #define __put_user_asm_half(x, __pu_addr, err, __t) \
4034baa9922SRussell King ({ \
404e8b94deaSMichael S. Tsirkin unsigned long __temp = (__force unsigned long)(x); \
4052df4c9a7SArnd Bergmann __put_user_asm_byte(__temp, __pu_addr, err, __t); \
4062df4c9a7SArnd Bergmann __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err, __t);\
4074baa9922SRussell King })
4084baa9922SRussell King #else
4092df4c9a7SArnd Bergmann #define __put_user_asm_half(x, __pu_addr, err, __t) \
4104baa9922SRussell King ({ \
411e8b94deaSMichael S. Tsirkin unsigned long __temp = (__force unsigned long)(x); \
4122df4c9a7SArnd Bergmann __put_user_asm_byte(__temp >> 8, __pu_addr, err, __t); \
4132df4c9a7SArnd Bergmann __put_user_asm_byte(__temp, __pu_addr + 1, err, __t); \
4144baa9922SRussell King })
4154baa9922SRussell King #endif
4164baa9922SRussell King
417344eb553SVincent Whitchurch #endif /* __LINUX_ARM_ARCH__ >= 6 */
418344eb553SVincent Whitchurch
4192df4c9a7SArnd Bergmann #define __put_user_asm_word(x, __pu_addr, err, __t) \
4202df4c9a7SArnd Bergmann __put_user_asm(x, __pu_addr, err, "str" __t)
4214baa9922SRussell King
4224baa9922SRussell King #ifndef __ARMEB__
4234baa9922SRussell King #define __reg_oper0 "%R2"
4244baa9922SRussell King #define __reg_oper1 "%Q2"
4254baa9922SRussell King #else
4264baa9922SRussell King #define __reg_oper0 "%Q2"
4274baa9922SRussell King #define __reg_oper1 "%R2"
4284baa9922SRussell King #endif
4294baa9922SRussell King
4302df4c9a7SArnd Bergmann #define __put_user_asm_dword(x, __pu_addr, err, __t) \
4314baa9922SRussell King __asm__ __volatile__( \
4322df4c9a7SArnd Bergmann ARM( "1: str" __t " " __reg_oper1 ", [%1], #4\n" ) \
4332df4c9a7SArnd Bergmann ARM( "2: str" __t " " __reg_oper0 ", [%1]\n" ) \
4342df4c9a7SArnd Bergmann THUMB( "1: str" __t " " __reg_oper1 ", [%1]\n" ) \
4352df4c9a7SArnd Bergmann THUMB( "2: str" __t " " __reg_oper0 ", [%1, #4]\n" ) \
4364baa9922SRussell King "3:\n" \
437c4a84ae3SArd Biesheuvel " .pushsection .text.fixup,\"ax\"\n" \
4384baa9922SRussell King " .align 2\n" \
4394baa9922SRussell King "4: mov %0, %3\n" \
4404baa9922SRussell King " b 3b\n" \
4414260415fSRussell King " .popsection\n" \
4424260415fSRussell King " .pushsection __ex_table,\"a\"\n" \
4434baa9922SRussell King " .align 3\n" \
4444baa9922SRussell King " .long 1b, 4b\n" \
4454baa9922SRussell King " .long 2b, 4b\n" \
4464260415fSRussell King " .popsection" \
4474baa9922SRussell King : "+r" (err), "+r" (__pu_addr) \
4484baa9922SRussell King : "r" (x), "i" (-EFAULT) \
4494baa9922SRussell King : "cc")
4504baa9922SRussell King
4512df4c9a7SArnd Bergmann #define __get_kernel_nofault(dst, src, type, err_label) \
4522df4c9a7SArnd Bergmann do { \
4532df4c9a7SArnd Bergmann const type *__pk_ptr = (src); \
4542df4c9a7SArnd Bergmann unsigned long __src = (unsigned long)(__pk_ptr); \
4552df4c9a7SArnd Bergmann type __val; \
4562df4c9a7SArnd Bergmann int __err = 0; \
4572df4c9a7SArnd Bergmann switch (sizeof(type)) { \
4582df4c9a7SArnd Bergmann case 1: __get_user_asm_byte(__val, __src, __err, ""); break; \
4592df4c9a7SArnd Bergmann case 2: __get_user_asm_half(__val, __src, __err, ""); break; \
4602df4c9a7SArnd Bergmann case 4: __get_user_asm_word(__val, __src, __err, ""); break; \
4612df4c9a7SArnd Bergmann case 8: { \
4622df4c9a7SArnd Bergmann u32 *__v32 = (u32*)&__val; \
4632df4c9a7SArnd Bergmann __get_user_asm_word(__v32[0], __src, __err, ""); \
4642df4c9a7SArnd Bergmann if (__err) \
4652df4c9a7SArnd Bergmann break; \
4662df4c9a7SArnd Bergmann __get_user_asm_word(__v32[1], __src+4, __err, ""); \
4672df4c9a7SArnd Bergmann break; \
4682df4c9a7SArnd Bergmann } \
4692df4c9a7SArnd Bergmann default: __err = __get_user_bad(); break; \
4702df4c9a7SArnd Bergmann } \
47115420269SArd Biesheuvel if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) \
47215420269SArd Biesheuvel put_unaligned(__val, (type *)(dst)); \
47315420269SArd Biesheuvel else \
47415420269SArd Biesheuvel *(type *)(dst) = __val; /* aligned by caller */ \
4752df4c9a7SArnd Bergmann if (__err) \
4762df4c9a7SArnd Bergmann goto err_label; \
4772df4c9a7SArnd Bergmann } while (0)
4782df4c9a7SArnd Bergmann
4792df4c9a7SArnd Bergmann #define __put_kernel_nofault(dst, src, type, err_label) \
4802df4c9a7SArnd Bergmann do { \
4812df4c9a7SArnd Bergmann const type *__pk_ptr = (dst); \
4822df4c9a7SArnd Bergmann unsigned long __dst = (unsigned long)__pk_ptr; \
4832df4c9a7SArnd Bergmann int __err = 0; \
48415420269SArd Biesheuvel type __val = IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \
48515420269SArd Biesheuvel ? get_unaligned((type *)(src)) \
48615420269SArd Biesheuvel : *(type *)(src); /* aligned by caller */ \
4872df4c9a7SArnd Bergmann switch (sizeof(type)) { \
4882df4c9a7SArnd Bergmann case 1: __put_user_asm_byte(__val, __dst, __err, ""); break; \
4892df4c9a7SArnd Bergmann case 2: __put_user_asm_half(__val, __dst, __err, ""); break; \
4902df4c9a7SArnd Bergmann case 4: __put_user_asm_word(__val, __dst, __err, ""); break; \
4912df4c9a7SArnd Bergmann case 8: __put_user_asm_dword(__val, __dst, __err, ""); break; \
4922df4c9a7SArnd Bergmann default: __err = __put_user_bad(); break; \
4932df4c9a7SArnd Bergmann } \
4942df4c9a7SArnd Bergmann if (__err) \
4952df4c9a7SArnd Bergmann goto err_label; \
4962df4c9a7SArnd Bergmann } while (0)
4974baa9922SRussell King
4984baa9922SRussell King #ifdef CONFIG_MMU
4993fba7e23SRussell King extern unsigned long __must_check
5003fba7e23SRussell King arm_copy_from_user(void *to, const void __user *from, unsigned long n);
5013fba7e23SRussell King
5023fba7e23SRussell King static inline unsigned long __must_check
raw_copy_from_user(void * to,const void __user * from,unsigned long n)5034de5b63eSAl Viro raw_copy_from_user(void *to, const void __user *from, unsigned long n)
5043fba7e23SRussell King {
505dfd45b61SKees Cook unsigned int __ua_flags;
506dfd45b61SKees Cook
507dfd45b61SKees Cook __ua_flags = uaccess_save_and_enable();
5083fba7e23SRussell King n = arm_copy_from_user(to, from, n);
5093fba7e23SRussell King uaccess_restore(__ua_flags);
5103fba7e23SRussell King return n;
5113fba7e23SRussell King }
5123fba7e23SRussell King
5133fba7e23SRussell King extern unsigned long __must_check
5143fba7e23SRussell King arm_copy_to_user(void __user *to, const void *from, unsigned long n);
5153fba7e23SRussell King extern unsigned long __must_check
5163fba7e23SRussell King __copy_to_user_std(void __user *to, const void *from, unsigned long n);
5173fba7e23SRussell King
5183fba7e23SRussell King static inline unsigned long __must_check
raw_copy_to_user(void __user * to,const void * from,unsigned long n)5194de5b63eSAl Viro raw_copy_to_user(void __user *to, const void *from, unsigned long n)
5203fba7e23SRussell King {
521c014953dSRussell King #ifndef CONFIG_UACCESS_WITH_MEMCPY
522dfd45b61SKees Cook unsigned int __ua_flags;
523dfd45b61SKees Cook __ua_flags = uaccess_save_and_enable();
5243fba7e23SRussell King n = arm_copy_to_user(to, from, n);
5253fba7e23SRussell King uaccess_restore(__ua_flags);
5263fba7e23SRussell King return n;
527c014953dSRussell King #else
528c014953dSRussell King return arm_copy_to_user(to, from, n);
529c014953dSRussell King #endif
5303fba7e23SRussell King }
5313fba7e23SRussell King
5323fba7e23SRussell King extern unsigned long __must_check
5333fba7e23SRussell King arm_clear_user(void __user *addr, unsigned long n);
5343fba7e23SRussell King extern unsigned long __must_check
5353fba7e23SRussell King __clear_user_std(void __user *addr, unsigned long n);
5363fba7e23SRussell King
5373fba7e23SRussell King static inline unsigned long __must_check
__clear_user(void __user * addr,unsigned long n)5383fba7e23SRussell King __clear_user(void __user *addr, unsigned long n)
5393fba7e23SRussell King {
5403fba7e23SRussell King unsigned int __ua_flags = uaccess_save_and_enable();
5413fba7e23SRussell King n = arm_clear_user(addr, n);
5423fba7e23SRussell King uaccess_restore(__ua_flags);
5433fba7e23SRussell King return n;
5443fba7e23SRussell King }
5453fba7e23SRussell King
5464baa9922SRussell King #else
5474de5b63eSAl Viro static inline unsigned long
raw_copy_from_user(void * to,const void __user * from,unsigned long n)5484de5b63eSAl Viro raw_copy_from_user(void *to, const void __user *from, unsigned long n)
5494de5b63eSAl Viro {
5504de5b63eSAl Viro memcpy(to, (const void __force *)from, n);
5514de5b63eSAl Viro return 0;
5524de5b63eSAl Viro }
5534de5b63eSAl Viro static inline unsigned long
raw_copy_to_user(void __user * to,const void * from,unsigned long n)5544de5b63eSAl Viro raw_copy_to_user(void __user *to, const void *from, unsigned long n)
5554de5b63eSAl Viro {
5564de5b63eSAl Viro memcpy((void __force *)to, from, n);
5574de5b63eSAl Viro return 0;
5584de5b63eSAl Viro }
5594baa9922SRussell King #define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
5604baa9922SRussell King #endif
5614de5b63eSAl Viro #define INLINE_COPY_TO_USER
5624de5b63eSAl Viro #define INLINE_COPY_FROM_USER
5634baa9922SRussell King
clear_user(void __user * to,unsigned long n)5644baa9922SRussell King static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
5654baa9922SRussell King {
56696d4f267SLinus Torvalds if (access_ok(to, n))
5674baa9922SRussell King n = __clear_user(to, n);
5684baa9922SRussell King return n;
5694baa9922SRussell King }
5704baa9922SRussell King
5713fba7e23SRussell King /* These are from lib/ code, and use __get_user() and friends */
5728c56cc8bSWill Deacon extern long strncpy_from_user(char *dest, const char __user *src, long count);
5734baa9922SRussell King
5748c56cc8bSWill Deacon extern __must_check long strnlen_user(const char __user *str, long n);
5754baa9922SRussell King
5764baa9922SRussell King #endif /* _ASMARM_UACCESS_H */
577