1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * arch/arm/include/asm/uaccess.h
4 */
5 #ifndef _ASMARM_UACCESS_H
6 #define _ASMARM_UACCESS_H
7
8 /*
9 * User space memory access functions
10 */
11 #include <linux/string.h>
12 #include <asm/page.h>
13 #include <asm/domain.h>
14 #include <asm/unaligned.h>
15 #include <asm/unified.h>
16 #include <asm/compiler.h>
17
18 #include <asm/extable.h>
19
20 /*
21 * These two functions allow hooking accesses to userspace to increase
22 * system integrity by ensuring that the kernel can not inadvertantly
23 * perform such accesses (eg, via list poison values) which could then
24 * be exploited for priviledge escalation.
25 */
uaccess_save_and_enable(void)26 static __always_inline unsigned int uaccess_save_and_enable(void)
27 {
28 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
29 unsigned int old_domain = get_domain();
30
31 /* Set the current domain access to permit user accesses */
32 set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
33 domain_val(DOMAIN_USER, DOMAIN_CLIENT));
34
35 return old_domain;
36 #else
37 return 0;
38 #endif
39 }
40
uaccess_restore(unsigned int flags)41 static __always_inline void uaccess_restore(unsigned int flags)
42 {
43 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
44 /* Restore the user access mask */
45 set_domain(flags);
46 #endif
47 }
48
49 /*
50 * These two are intentionally not defined anywhere - if the kernel
51 * code generates any references to them, that's a bug.
52 */
53 extern int __get_user_bad(void);
54 extern int __put_user_bad(void);
55
56 #ifdef CONFIG_MMU
57
58 /*
59 * This is a type: either unsigned long, if the argument fits into
60 * that type, or otherwise unsigned long long.
61 */
62 #define __inttype(x) \
63 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
64
65 /*
66 * Sanitise a uaccess pointer such that it becomes NULL if addr+size
67 * is above the current addr_limit.
68 */
69 #define uaccess_mask_range_ptr(ptr, size) \
70 ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
__uaccess_mask_range_ptr(const void __user * ptr,size_t size)71 static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
72 size_t size)
73 {
74 void __user *safe_ptr = (void __user *)ptr;
75 unsigned long tmp;
76
77 asm volatile(
78 " .syntax unified\n"
79 " sub %1, %3, #1\n"
80 " subs %1, %1, %0\n"
81 " addhs %1, %1, #1\n"
82 " subshs %1, %1, %2\n"
83 " movlo %0, #0\n"
84 : "+r" (safe_ptr), "=&r" (tmp)
85 : "r" (size), "r" (TASK_SIZE)
86 : "cc");
87
88 csdb();
89 return safe_ptr;
90 }
91
92 /*
93 * Single-value transfer routines. They automatically use the right
94 * size if we just have the right pointer type. Note that the functions
95 * which read from user space (*get_*) need to take care not to leak
96 * kernel data even if the calling code is buggy and fails to check
97 * the return value. This means zeroing out the destination variable
98 * or buffer on error. Normally this is done out of line by the
99 * fixup code, but there are a few places where it intrudes on the
100 * main code path. When we only write to user space, there is no
101 * problem.
102 */
103 extern int __get_user_1(void *);
104 extern int __get_user_2(void *);
105 extern int __get_user_4(void *);
106 extern int __get_user_32t_8(void *);
107 extern int __get_user_8(void *);
108 extern int __get_user_64t_1(void *);
109 extern int __get_user_64t_2(void *);
110 extern int __get_user_64t_4(void *);
111
112 #define __get_user_x(__r2, __p, __e, __l, __s) \
113 __asm__ __volatile__ ( \
114 __asmeq("%0", "r0") __asmeq("%1", "r2") \
115 __asmeq("%3", "r1") \
116 "bl __get_user_" #__s \
117 : "=&r" (__e), "=r" (__r2) \
118 : "0" (__p), "r" (__l) \
119 : "ip", "lr", "cc")
120
121 /* narrowing a double-word get into a single 32bit word register: */
122 #ifdef __ARMEB__
123 #define __get_user_x_32t(__r2, __p, __e, __l, __s) \
124 __get_user_x(__r2, __p, __e, __l, 32t_8)
125 #else
126 #define __get_user_x_32t __get_user_x
127 #endif
128
129 /*
130 * storing result into proper least significant word of 64bit target var,
131 * different only for big endian case where 64 bit __r2 lsw is r3:
132 */
133 #ifdef __ARMEB__
134 #define __get_user_x_64t(__r2, __p, __e, __l, __s) \
135 __asm__ __volatile__ ( \
136 __asmeq("%0", "r0") __asmeq("%1", "r2") \
137 __asmeq("%3", "r1") \
138 "bl __get_user_64t_" #__s \
139 : "=&r" (__e), "=r" (__r2) \
140 : "0" (__p), "r" (__l) \
141 : "ip", "lr", "cc")
142 #else
143 #define __get_user_x_64t __get_user_x
144 #endif
145
146
147 #define __get_user_check(x, p) \
148 ({ \
149 unsigned long __limit = TASK_SIZE - 1; \
150 register typeof(*(p)) __user *__p asm("r0") = (p); \
151 register __inttype(x) __r2 asm("r2"); \
152 register unsigned long __l asm("r1") = __limit; \
153 register int __e asm("r0"); \
154 unsigned int __ua_flags = uaccess_save_and_enable(); \
155 int __tmp_e; \
156 switch (sizeof(*(__p))) { \
157 case 1: \
158 if (sizeof((x)) >= 8) \
159 __get_user_x_64t(__r2, __p, __e, __l, 1); \
160 else \
161 __get_user_x(__r2, __p, __e, __l, 1); \
162 break; \
163 case 2: \
164 if (sizeof((x)) >= 8) \
165 __get_user_x_64t(__r2, __p, __e, __l, 2); \
166 else \
167 __get_user_x(__r2, __p, __e, __l, 2); \
168 break; \
169 case 4: \
170 if (sizeof((x)) >= 8) \
171 __get_user_x_64t(__r2, __p, __e, __l, 4); \
172 else \
173 __get_user_x(__r2, __p, __e, __l, 4); \
174 break; \
175 case 8: \
176 if (sizeof((x)) < 8) \
177 __get_user_x_32t(__r2, __p, __e, __l, 4); \
178 else \
179 __get_user_x(__r2, __p, __e, __l, 8); \
180 break; \
181 default: __e = __get_user_bad(); break; \
182 } \
183 __tmp_e = __e; \
184 uaccess_restore(__ua_flags); \
185 x = (typeof(*(p))) __r2; \
186 __tmp_e; \
187 })
188
189 #define get_user(x, p) \
190 ({ \
191 might_fault(); \
192 __get_user_check(x, p); \
193 })
194
195 extern int __put_user_1(void *, unsigned int);
196 extern int __put_user_2(void *, unsigned int);
197 extern int __put_user_4(void *, unsigned int);
198 extern int __put_user_8(void *, unsigned long long);
199
200 #define __put_user_check(__pu_val, __ptr, __err, __s) \
201 ({ \
202 unsigned long __limit = TASK_SIZE - 1; \
203 register typeof(__pu_val) __r2 asm("r2") = __pu_val; \
204 register const void __user *__p asm("r0") = __ptr; \
205 register unsigned long __l asm("r1") = __limit; \
206 register int __e asm("r0"); \
207 __asm__ __volatile__ ( \
208 __asmeq("%0", "r0") __asmeq("%2", "r2") \
209 __asmeq("%3", "r1") \
210 "bl __put_user_" #__s \
211 : "=&r" (__e) \
212 : "0" (__p), "r" (__r2), "r" (__l) \
213 : "ip", "lr", "cc"); \
214 __err = __e; \
215 })
216
217 #else /* CONFIG_MMU */
218
219 #define get_user(x, p) __get_user(x, p)
220 #define __put_user_check __put_user_nocheck
221
222 #endif /* CONFIG_MMU */
223
224 #include <asm-generic/access_ok.h>
225
226 #ifdef CONFIG_CPU_SPECTRE
227 /*
228 * When mitigating Spectre variant 1, it is not worth fixing the non-
229 * verifying accessors, because we need to add verification of the
230 * address space there. Force these to use the standard get_user()
231 * version instead.
232 */
233 #define __get_user(x, ptr) get_user(x, ptr)
234 #else
235
236 /*
237 * The "__xxx" versions of the user access functions do not verify the
238 * address space - it must have been done previously with a separate
239 * "access_ok()" call.
240 *
241 * The "xxx_error" versions set the third argument to EFAULT if an
242 * error occurs, and leave it unchanged on success. Note that these
243 * versions are void (ie, don't return a value as such).
244 */
245 #define __get_user(x, ptr) \
246 ({ \
247 long __gu_err = 0; \
248 __get_user_err((x), (ptr), __gu_err, TUSER()); \
249 __gu_err; \
250 })
251
252 #define __get_user_err(x, ptr, err, __t) \
253 do { \
254 unsigned long __gu_addr = (unsigned long)(ptr); \
255 unsigned long __gu_val; \
256 unsigned int __ua_flags; \
257 __chk_user_ptr(ptr); \
258 might_fault(); \
259 __ua_flags = uaccess_save_and_enable(); \
260 switch (sizeof(*(ptr))) { \
261 case 1: __get_user_asm_byte(__gu_val, __gu_addr, err, __t); break; \
262 case 2: __get_user_asm_half(__gu_val, __gu_addr, err, __t); break; \
263 case 4: __get_user_asm_word(__gu_val, __gu_addr, err, __t); break; \
264 default: (__gu_val) = __get_user_bad(); \
265 } \
266 uaccess_restore(__ua_flags); \
267 (x) = (__typeof__(*(ptr)))__gu_val; \
268 } while (0)
269 #endif
270
271 #define __get_user_asm(x, addr, err, instr) \
272 __asm__ __volatile__( \
273 "1: " instr " %1, [%2], #0\n" \
274 "2:\n" \
275 " .pushsection .text.fixup,\"ax\"\n" \
276 " .align 2\n" \
277 "3: mov %0, %3\n" \
278 " mov %1, #0\n" \
279 " b 2b\n" \
280 " .popsection\n" \
281 " .pushsection __ex_table,\"a\"\n" \
282 " .align 3\n" \
283 " .long 1b, 3b\n" \
284 " .popsection" \
285 : "+r" (err), "=&r" (x) \
286 : "r" (addr), "i" (-EFAULT) \
287 : "cc")
288
289 #define __get_user_asm_byte(x, addr, err, __t) \
290 __get_user_asm(x, addr, err, "ldrb" __t)
291
292 #if __LINUX_ARM_ARCH__ >= 6
293
294 #define __get_user_asm_half(x, addr, err, __t) \
295 __get_user_asm(x, addr, err, "ldrh" __t)
296
297 #else
298
299 #ifndef __ARMEB__
300 #define __get_user_asm_half(x, __gu_addr, err, __t) \
301 ({ \
302 unsigned long __b1, __b2; \
303 __get_user_asm_byte(__b1, __gu_addr, err, __t); \
304 __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \
305 (x) = __b1 | (__b2 << 8); \
306 })
307 #else
308 #define __get_user_asm_half(x, __gu_addr, err, __t) \
309 ({ \
310 unsigned long __b1, __b2; \
311 __get_user_asm_byte(__b1, __gu_addr, err, __t); \
312 __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \
313 (x) = (__b1 << 8) | __b2; \
314 })
315 #endif
316
317 #endif /* __LINUX_ARM_ARCH__ >= 6 */
318
319 #define __get_user_asm_word(x, addr, err, __t) \
320 __get_user_asm(x, addr, err, "ldr" __t)
321
322 #define __put_user_switch(x, ptr, __err, __fn) \
323 do { \
324 const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
325 __typeof__(*(ptr)) __pu_val = (x); \
326 unsigned int __ua_flags; \
327 might_fault(); \
328 __ua_flags = uaccess_save_and_enable(); \
329 switch (sizeof(*(ptr))) { \
330 case 1: __fn(__pu_val, __pu_ptr, __err, 1); break; \
331 case 2: __fn(__pu_val, __pu_ptr, __err, 2); break; \
332 case 4: __fn(__pu_val, __pu_ptr, __err, 4); break; \
333 case 8: __fn(__pu_val, __pu_ptr, __err, 8); break; \
334 default: __err = __put_user_bad(); break; \
335 } \
336 uaccess_restore(__ua_flags); \
337 } while (0)
338
339 #define put_user(x, ptr) \
340 ({ \
341 int __pu_err = 0; \
342 __put_user_switch((x), (ptr), __pu_err, __put_user_check); \
343 __pu_err; \
344 })
345
346 #ifdef CONFIG_CPU_SPECTRE
347 /*
348 * When mitigating Spectre variant 1.1, all accessors need to include
349 * verification of the address space.
350 */
351 #define __put_user(x, ptr) put_user(x, ptr)
352
353 #else
354 #define __put_user(x, ptr) \
355 ({ \
356 long __pu_err = 0; \
357 __put_user_switch((x), (ptr), __pu_err, __put_user_nocheck); \
358 __pu_err; \
359 })
360
361 #define __put_user_nocheck(x, __pu_ptr, __err, __size) \
362 do { \
363 unsigned long __pu_addr = (unsigned long)__pu_ptr; \
364 __put_user_nocheck_##__size(x, __pu_addr, __err, TUSER());\
365 } while (0)
366
367 #define __put_user_nocheck_1 __put_user_asm_byte
368 #define __put_user_nocheck_2 __put_user_asm_half
369 #define __put_user_nocheck_4 __put_user_asm_word
370 #define __put_user_nocheck_8 __put_user_asm_dword
371
372 #endif /* !CONFIG_CPU_SPECTRE */
373
374 #define __put_user_asm(x, __pu_addr, err, instr) \
375 __asm__ __volatile__( \
376 "1: " instr " %1, [%2], #0\n" \
377 "2:\n" \
378 " .pushsection .text.fixup,\"ax\"\n" \
379 " .align 2\n" \
380 "3: mov %0, %3\n" \
381 " b 2b\n" \
382 " .popsection\n" \
383 " .pushsection __ex_table,\"a\"\n" \
384 " .align 3\n" \
385 " .long 1b, 3b\n" \
386 " .popsection" \
387 : "+r" (err) \
388 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
389 : "cc")
390
391 #define __put_user_asm_byte(x, __pu_addr, err, __t) \
392 __put_user_asm(x, __pu_addr, err, "strb" __t)
393
394 #if __LINUX_ARM_ARCH__ >= 6
395
396 #define __put_user_asm_half(x, __pu_addr, err, __t) \
397 __put_user_asm(x, __pu_addr, err, "strh" __t)
398
399 #else
400
401 #ifndef __ARMEB__
402 #define __put_user_asm_half(x, __pu_addr, err, __t) \
403 ({ \
404 unsigned long __temp = (__force unsigned long)(x); \
405 __put_user_asm_byte(__temp, __pu_addr, err, __t); \
406 __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err, __t);\
407 })
408 #else
409 #define __put_user_asm_half(x, __pu_addr, err, __t) \
410 ({ \
411 unsigned long __temp = (__force unsigned long)(x); \
412 __put_user_asm_byte(__temp >> 8, __pu_addr, err, __t); \
413 __put_user_asm_byte(__temp, __pu_addr + 1, err, __t); \
414 })
415 #endif
416
417 #endif /* __LINUX_ARM_ARCH__ >= 6 */
418
419 #define __put_user_asm_word(x, __pu_addr, err, __t) \
420 __put_user_asm(x, __pu_addr, err, "str" __t)
421
422 #ifndef __ARMEB__
423 #define __reg_oper0 "%R2"
424 #define __reg_oper1 "%Q2"
425 #else
426 #define __reg_oper0 "%Q2"
427 #define __reg_oper1 "%R2"
428 #endif
429
430 #define __put_user_asm_dword(x, __pu_addr, err, __t) \
431 __asm__ __volatile__( \
432 ARM( "1: str" __t " " __reg_oper1 ", [%1], #4\n" ) \
433 ARM( "2: str" __t " " __reg_oper0 ", [%1]\n" ) \
434 THUMB( "1: str" __t " " __reg_oper1 ", [%1]\n" ) \
435 THUMB( "2: str" __t " " __reg_oper0 ", [%1, #4]\n" ) \
436 "3:\n" \
437 " .pushsection .text.fixup,\"ax\"\n" \
438 " .align 2\n" \
439 "4: mov %0, %3\n" \
440 " b 3b\n" \
441 " .popsection\n" \
442 " .pushsection __ex_table,\"a\"\n" \
443 " .align 3\n" \
444 " .long 1b, 4b\n" \
445 " .long 2b, 4b\n" \
446 " .popsection" \
447 : "+r" (err), "+r" (__pu_addr) \
448 : "r" (x), "i" (-EFAULT) \
449 : "cc")
450
451 #define __get_kernel_nofault(dst, src, type, err_label) \
452 do { \
453 const type *__pk_ptr = (src); \
454 unsigned long __src = (unsigned long)(__pk_ptr); \
455 type __val; \
456 int __err = 0; \
457 switch (sizeof(type)) { \
458 case 1: __get_user_asm_byte(__val, __src, __err, ""); break; \
459 case 2: __get_user_asm_half(__val, __src, __err, ""); break; \
460 case 4: __get_user_asm_word(__val, __src, __err, ""); break; \
461 case 8: { \
462 u32 *__v32 = (u32*)&__val; \
463 __get_user_asm_word(__v32[0], __src, __err, ""); \
464 if (__err) \
465 break; \
466 __get_user_asm_word(__v32[1], __src+4, __err, ""); \
467 break; \
468 } \
469 default: __err = __get_user_bad(); break; \
470 } \
471 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) \
472 put_unaligned(__val, (type *)(dst)); \
473 else \
474 *(type *)(dst) = __val; /* aligned by caller */ \
475 if (__err) \
476 goto err_label; \
477 } while (0)
478
479 #define __put_kernel_nofault(dst, src, type, err_label) \
480 do { \
481 const type *__pk_ptr = (dst); \
482 unsigned long __dst = (unsigned long)__pk_ptr; \
483 int __err = 0; \
484 type __val = IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \
485 ? get_unaligned((type *)(src)) \
486 : *(type *)(src); /* aligned by caller */ \
487 switch (sizeof(type)) { \
488 case 1: __put_user_asm_byte(__val, __dst, __err, ""); break; \
489 case 2: __put_user_asm_half(__val, __dst, __err, ""); break; \
490 case 4: __put_user_asm_word(__val, __dst, __err, ""); break; \
491 case 8: __put_user_asm_dword(__val, __dst, __err, ""); break; \
492 default: __err = __put_user_bad(); break; \
493 } \
494 if (__err) \
495 goto err_label; \
496 } while (0)
497
498 #ifdef CONFIG_MMU
499 extern unsigned long __must_check
500 arm_copy_from_user(void *to, const void __user *from, unsigned long n);
501
502 static inline unsigned long __must_check
raw_copy_from_user(void * to,const void __user * from,unsigned long n)503 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
504 {
505 unsigned int __ua_flags;
506
507 __ua_flags = uaccess_save_and_enable();
508 n = arm_copy_from_user(to, from, n);
509 uaccess_restore(__ua_flags);
510 return n;
511 }
512
513 extern unsigned long __must_check
514 arm_copy_to_user(void __user *to, const void *from, unsigned long n);
515 extern unsigned long __must_check
516 __copy_to_user_std(void __user *to, const void *from, unsigned long n);
517
518 static inline unsigned long __must_check
raw_copy_to_user(void __user * to,const void * from,unsigned long n)519 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
520 {
521 #ifndef CONFIG_UACCESS_WITH_MEMCPY
522 unsigned int __ua_flags;
523 __ua_flags = uaccess_save_and_enable();
524 n = arm_copy_to_user(to, from, n);
525 uaccess_restore(__ua_flags);
526 return n;
527 #else
528 return arm_copy_to_user(to, from, n);
529 #endif
530 }
531
532 extern unsigned long __must_check
533 arm_clear_user(void __user *addr, unsigned long n);
534 extern unsigned long __must_check
535 __clear_user_std(void __user *addr, unsigned long n);
536
537 static inline unsigned long __must_check
__clear_user(void __user * addr,unsigned long n)538 __clear_user(void __user *addr, unsigned long n)
539 {
540 unsigned int __ua_flags = uaccess_save_and_enable();
541 n = arm_clear_user(addr, n);
542 uaccess_restore(__ua_flags);
543 return n;
544 }
545
546 #else
547 static inline unsigned long
raw_copy_from_user(void * to,const void __user * from,unsigned long n)548 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
549 {
550 memcpy(to, (const void __force *)from, n);
551 return 0;
552 }
553 static inline unsigned long
raw_copy_to_user(void __user * to,const void * from,unsigned long n)554 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
555 {
556 memcpy((void __force *)to, from, n);
557 return 0;
558 }
559 #define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
560 #endif
561 #define INLINE_COPY_TO_USER
562 #define INLINE_COPY_FROM_USER
563
clear_user(void __user * to,unsigned long n)564 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
565 {
566 if (access_ok(to, n))
567 n = __clear_user(to, n);
568 return n;
569 }
570
571 /* These are from lib/ code, and use __get_user() and friends */
572 extern long strncpy_from_user(char *dest, const char __user *src, long count);
573
574 extern __must_check long strnlen_user(const char __user *str, long n);
575
576 #endif /* _ASMARM_UACCESS_H */
577