1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * arch/arm/include/asm/uaccess.h 4 */ 5 #ifndef _ASMARM_UACCESS_H 6 #define _ASMARM_UACCESS_H 7 8 /* 9 * User space memory access functions 10 */ 11 #include <linux/string.h> 12 #include <asm/memory.h> 13 #include <asm/domain.h> 14 #include <asm/unified.h> 15 #include <asm/compiler.h> 16 17 #include <asm/extable.h> 18 19 /* 20 * These two functions allow hooking accesses to userspace to increase 21 * system integrity by ensuring that the kernel can not inadvertantly 22 * perform such accesses (eg, via list poison values) which could then 23 * be exploited for priviledge escalation. 24 */ 25 static __always_inline unsigned int uaccess_save_and_enable(void) 26 { 27 #ifdef CONFIG_CPU_SW_DOMAIN_PAN 28 unsigned int old_domain = get_domain(); 29 30 /* Set the current domain access to permit user accesses */ 31 set_domain((old_domain & ~domain_mask(DOMAIN_USER)) | 32 domain_val(DOMAIN_USER, DOMAIN_CLIENT)); 33 34 return old_domain; 35 #else 36 return 0; 37 #endif 38 } 39 40 static __always_inline void uaccess_restore(unsigned int flags) 41 { 42 #ifdef CONFIG_CPU_SW_DOMAIN_PAN 43 /* Restore the user access mask */ 44 set_domain(flags); 45 #endif 46 } 47 48 /* 49 * These two are intentionally not defined anywhere - if the kernel 50 * code generates any references to them, that's a bug. 51 */ 52 extern int __get_user_bad(void); 53 extern int __put_user_bad(void); 54 55 /* 56 * Note that this is actually 0x1,0000,0000 57 */ 58 #define KERNEL_DS 0x00000000 59 60 #ifdef CONFIG_MMU 61 62 #define USER_DS TASK_SIZE 63 #define get_fs() (current_thread_info()->addr_limit) 64 65 static inline void set_fs(mm_segment_t fs) 66 { 67 current_thread_info()->addr_limit = fs; 68 69 /* 70 * Prevent a mispredicted conditional call to set_fs from forwarding 71 * the wrong address limit to access_ok under speculation. 72 */ 73 dsb(nsh); 74 isb(); 75 76 modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); 77 } 78 79 #define segment_eq(a, b) ((a) == (b)) 80 81 /* We use 33-bit arithmetic here... */ 82 #define __range_ok(addr, size) ({ \ 83 unsigned long flag, roksum; \ 84 __chk_user_ptr(addr); \ 85 __asm__(".syntax unified\n" \ 86 "adds %1, %2, %3; sbcscc %1, %1, %0; movcc %0, #0" \ 87 : "=&r" (flag), "=&r" (roksum) \ 88 : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \ 89 : "cc"); \ 90 flag; }) 91 92 /* 93 * This is a type: either unsigned long, if the argument fits into 94 * that type, or otherwise unsigned long long. 95 */ 96 #define __inttype(x) \ 97 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) 98 99 /* 100 * Sanitise a uaccess pointer such that it becomes NULL if addr+size 101 * is above the current addr_limit. 102 */ 103 #define uaccess_mask_range_ptr(ptr, size) \ 104 ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size)) 105 static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr, 106 size_t size) 107 { 108 void __user *safe_ptr = (void __user *)ptr; 109 unsigned long tmp; 110 111 asm volatile( 112 " .syntax unified\n" 113 " sub %1, %3, #1\n" 114 " subs %1, %1, %0\n" 115 " addhs %1, %1, #1\n" 116 " subshs %1, %1, %2\n" 117 " movlo %0, #0\n" 118 : "+r" (safe_ptr), "=&r" (tmp) 119 : "r" (size), "r" (current_thread_info()->addr_limit) 120 : "cc"); 121 122 csdb(); 123 return safe_ptr; 124 } 125 126 /* 127 * Single-value transfer routines. They automatically use the right 128 * size if we just have the right pointer type. Note that the functions 129 * which read from user space (*get_*) need to take care not to leak 130 * kernel data even if the calling code is buggy and fails to check 131 * the return value. This means zeroing out the destination variable 132 * or buffer on error. Normally this is done out of line by the 133 * fixup code, but there are a few places where it intrudes on the 134 * main code path. When we only write to user space, there is no 135 * problem. 136 */ 137 extern int __get_user_1(void *); 138 extern int __get_user_2(void *); 139 extern int __get_user_4(void *); 140 extern int __get_user_32t_8(void *); 141 extern int __get_user_8(void *); 142 extern int __get_user_64t_1(void *); 143 extern int __get_user_64t_2(void *); 144 extern int __get_user_64t_4(void *); 145 146 #define __GUP_CLOBBER_1 "lr", "cc" 147 #ifdef CONFIG_CPU_USE_DOMAINS 148 #define __GUP_CLOBBER_2 "ip", "lr", "cc" 149 #else 150 #define __GUP_CLOBBER_2 "lr", "cc" 151 #endif 152 #define __GUP_CLOBBER_4 "lr", "cc" 153 #define __GUP_CLOBBER_32t_8 "lr", "cc" 154 #define __GUP_CLOBBER_8 "lr", "cc" 155 156 #define __get_user_x(__r2, __p, __e, __l, __s) \ 157 __asm__ __volatile__ ( \ 158 __asmeq("%0", "r0") __asmeq("%1", "r2") \ 159 __asmeq("%3", "r1") \ 160 "bl __get_user_" #__s \ 161 : "=&r" (__e), "=r" (__r2) \ 162 : "0" (__p), "r" (__l) \ 163 : __GUP_CLOBBER_##__s) 164 165 /* narrowing a double-word get into a single 32bit word register: */ 166 #ifdef __ARMEB__ 167 #define __get_user_x_32t(__r2, __p, __e, __l, __s) \ 168 __get_user_x(__r2, __p, __e, __l, 32t_8) 169 #else 170 #define __get_user_x_32t __get_user_x 171 #endif 172 173 /* 174 * storing result into proper least significant word of 64bit target var, 175 * different only for big endian case where 64 bit __r2 lsw is r3: 176 */ 177 #ifdef __ARMEB__ 178 #define __get_user_x_64t(__r2, __p, __e, __l, __s) \ 179 __asm__ __volatile__ ( \ 180 __asmeq("%0", "r0") __asmeq("%1", "r2") \ 181 __asmeq("%3", "r1") \ 182 "bl __get_user_64t_" #__s \ 183 : "=&r" (__e), "=r" (__r2) \ 184 : "0" (__p), "r" (__l) \ 185 : __GUP_CLOBBER_##__s) 186 #else 187 #define __get_user_x_64t __get_user_x 188 #endif 189 190 191 #define __get_user_check(x, p) \ 192 ({ \ 193 unsigned long __limit = current_thread_info()->addr_limit - 1; \ 194 register typeof(*(p)) __user *__p asm("r0") = (p); \ 195 register __inttype(x) __r2 asm("r2"); \ 196 register unsigned long __l asm("r1") = __limit; \ 197 register int __e asm("r0"); \ 198 unsigned int __ua_flags = uaccess_save_and_enable(); \ 199 switch (sizeof(*(__p))) { \ 200 case 1: \ 201 if (sizeof((x)) >= 8) \ 202 __get_user_x_64t(__r2, __p, __e, __l, 1); \ 203 else \ 204 __get_user_x(__r2, __p, __e, __l, 1); \ 205 break; \ 206 case 2: \ 207 if (sizeof((x)) >= 8) \ 208 __get_user_x_64t(__r2, __p, __e, __l, 2); \ 209 else \ 210 __get_user_x(__r2, __p, __e, __l, 2); \ 211 break; \ 212 case 4: \ 213 if (sizeof((x)) >= 8) \ 214 __get_user_x_64t(__r2, __p, __e, __l, 4); \ 215 else \ 216 __get_user_x(__r2, __p, __e, __l, 4); \ 217 break; \ 218 case 8: \ 219 if (sizeof((x)) < 8) \ 220 __get_user_x_32t(__r2, __p, __e, __l, 4); \ 221 else \ 222 __get_user_x(__r2, __p, __e, __l, 8); \ 223 break; \ 224 default: __e = __get_user_bad(); break; \ 225 } \ 226 uaccess_restore(__ua_flags); \ 227 x = (typeof(*(p))) __r2; \ 228 __e; \ 229 }) 230 231 #define get_user(x, p) \ 232 ({ \ 233 might_fault(); \ 234 __get_user_check(x, p); \ 235 }) 236 237 extern int __put_user_1(void *, unsigned int); 238 extern int __put_user_2(void *, unsigned int); 239 extern int __put_user_4(void *, unsigned int); 240 extern int __put_user_8(void *, unsigned long long); 241 242 #define __put_user_check(__pu_val, __ptr, __err, __s) \ 243 ({ \ 244 unsigned long __limit = current_thread_info()->addr_limit - 1; \ 245 register typeof(__pu_val) __r2 asm("r2") = __pu_val; \ 246 register const void __user *__p asm("r0") = __ptr; \ 247 register unsigned long __l asm("r1") = __limit; \ 248 register int __e asm("r0"); \ 249 __asm__ __volatile__ ( \ 250 __asmeq("%0", "r0") __asmeq("%2", "r2") \ 251 __asmeq("%3", "r1") \ 252 "bl __put_user_" #__s \ 253 : "=&r" (__e) \ 254 : "0" (__p), "r" (__r2), "r" (__l) \ 255 : "ip", "lr", "cc"); \ 256 __err = __e; \ 257 }) 258 259 #else /* CONFIG_MMU */ 260 261 /* 262 * uClinux has only one addr space, so has simplified address limits. 263 */ 264 #define USER_DS KERNEL_DS 265 266 #define segment_eq(a, b) (1) 267 #define __addr_ok(addr) ((void)(addr), 1) 268 #define __range_ok(addr, size) ((void)(addr), 0) 269 #define get_fs() (KERNEL_DS) 270 271 static inline void set_fs(mm_segment_t fs) 272 { 273 } 274 275 #define get_user(x, p) __get_user(x, p) 276 #define __put_user_check __put_user_nocheck 277 278 #endif /* CONFIG_MMU */ 279 280 #define access_ok(addr, size) (__range_ok(addr, size) == 0) 281 282 #define user_addr_max() \ 283 (uaccess_kernel() ? ~0UL : get_fs()) 284 285 #ifdef CONFIG_CPU_SPECTRE 286 /* 287 * When mitigating Spectre variant 1, it is not worth fixing the non- 288 * verifying accessors, because we need to add verification of the 289 * address space there. Force these to use the standard get_user() 290 * version instead. 291 */ 292 #define __get_user(x, ptr) get_user(x, ptr) 293 #else 294 295 /* 296 * The "__xxx" versions of the user access functions do not verify the 297 * address space - it must have been done previously with a separate 298 * "access_ok()" call. 299 * 300 * The "xxx_error" versions set the third argument to EFAULT if an 301 * error occurs, and leave it unchanged on success. Note that these 302 * versions are void (ie, don't return a value as such). 303 */ 304 #define __get_user(x, ptr) \ 305 ({ \ 306 long __gu_err = 0; \ 307 __get_user_err((x), (ptr), __gu_err); \ 308 __gu_err; \ 309 }) 310 311 #define __get_user_err(x, ptr, err) \ 312 do { \ 313 unsigned long __gu_addr = (unsigned long)(ptr); \ 314 unsigned long __gu_val; \ 315 unsigned int __ua_flags; \ 316 __chk_user_ptr(ptr); \ 317 might_fault(); \ 318 __ua_flags = uaccess_save_and_enable(); \ 319 switch (sizeof(*(ptr))) { \ 320 case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \ 321 case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \ 322 case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \ 323 default: (__gu_val) = __get_user_bad(); \ 324 } \ 325 uaccess_restore(__ua_flags); \ 326 (x) = (__typeof__(*(ptr)))__gu_val; \ 327 } while (0) 328 329 #define __get_user_asm(x, addr, err, instr) \ 330 __asm__ __volatile__( \ 331 "1: " TUSER(instr) " %1, [%2], #0\n" \ 332 "2:\n" \ 333 " .pushsection .text.fixup,\"ax\"\n" \ 334 " .align 2\n" \ 335 "3: mov %0, %3\n" \ 336 " mov %1, #0\n" \ 337 " b 2b\n" \ 338 " .popsection\n" \ 339 " .pushsection __ex_table,\"a\"\n" \ 340 " .align 3\n" \ 341 " .long 1b, 3b\n" \ 342 " .popsection" \ 343 : "+r" (err), "=&r" (x) \ 344 : "r" (addr), "i" (-EFAULT) \ 345 : "cc") 346 347 #define __get_user_asm_byte(x, addr, err) \ 348 __get_user_asm(x, addr, err, ldrb) 349 350 #if __LINUX_ARM_ARCH__ >= 6 351 352 #define __get_user_asm_half(x, addr, err) \ 353 __get_user_asm(x, addr, err, ldrh) 354 355 #else 356 357 #ifndef __ARMEB__ 358 #define __get_user_asm_half(x, __gu_addr, err) \ 359 ({ \ 360 unsigned long __b1, __b2; \ 361 __get_user_asm_byte(__b1, __gu_addr, err); \ 362 __get_user_asm_byte(__b2, __gu_addr + 1, err); \ 363 (x) = __b1 | (__b2 << 8); \ 364 }) 365 #else 366 #define __get_user_asm_half(x, __gu_addr, err) \ 367 ({ \ 368 unsigned long __b1, __b2; \ 369 __get_user_asm_byte(__b1, __gu_addr, err); \ 370 __get_user_asm_byte(__b2, __gu_addr + 1, err); \ 371 (x) = (__b1 << 8) | __b2; \ 372 }) 373 #endif 374 375 #endif /* __LINUX_ARM_ARCH__ >= 6 */ 376 377 #define __get_user_asm_word(x, addr, err) \ 378 __get_user_asm(x, addr, err, ldr) 379 #endif 380 381 382 #define __put_user_switch(x, ptr, __err, __fn) \ 383 do { \ 384 const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \ 385 __typeof__(*(ptr)) __pu_val = (x); \ 386 unsigned int __ua_flags; \ 387 might_fault(); \ 388 __ua_flags = uaccess_save_and_enable(); \ 389 switch (sizeof(*(ptr))) { \ 390 case 1: __fn(__pu_val, __pu_ptr, __err, 1); break; \ 391 case 2: __fn(__pu_val, __pu_ptr, __err, 2); break; \ 392 case 4: __fn(__pu_val, __pu_ptr, __err, 4); break; \ 393 case 8: __fn(__pu_val, __pu_ptr, __err, 8); break; \ 394 default: __err = __put_user_bad(); break; \ 395 } \ 396 uaccess_restore(__ua_flags); \ 397 } while (0) 398 399 #define put_user(x, ptr) \ 400 ({ \ 401 int __pu_err = 0; \ 402 __put_user_switch((x), (ptr), __pu_err, __put_user_check); \ 403 __pu_err; \ 404 }) 405 406 #ifdef CONFIG_CPU_SPECTRE 407 /* 408 * When mitigating Spectre variant 1.1, all accessors need to include 409 * verification of the address space. 410 */ 411 #define __put_user(x, ptr) put_user(x, ptr) 412 413 #else 414 #define __put_user(x, ptr) \ 415 ({ \ 416 long __pu_err = 0; \ 417 __put_user_switch((x), (ptr), __pu_err, __put_user_nocheck); \ 418 __pu_err; \ 419 }) 420 421 #define __put_user_nocheck(x, __pu_ptr, __err, __size) \ 422 do { \ 423 unsigned long __pu_addr = (unsigned long)__pu_ptr; \ 424 __put_user_nocheck_##__size(x, __pu_addr, __err); \ 425 } while (0) 426 427 #define __put_user_nocheck_1 __put_user_asm_byte 428 #define __put_user_nocheck_2 __put_user_asm_half 429 #define __put_user_nocheck_4 __put_user_asm_word 430 #define __put_user_nocheck_8 __put_user_asm_dword 431 432 #define __put_user_asm(x, __pu_addr, err, instr) \ 433 __asm__ __volatile__( \ 434 "1: " TUSER(instr) " %1, [%2], #0\n" \ 435 "2:\n" \ 436 " .pushsection .text.fixup,\"ax\"\n" \ 437 " .align 2\n" \ 438 "3: mov %0, %3\n" \ 439 " b 2b\n" \ 440 " .popsection\n" \ 441 " .pushsection __ex_table,\"a\"\n" \ 442 " .align 3\n" \ 443 " .long 1b, 3b\n" \ 444 " .popsection" \ 445 : "+r" (err) \ 446 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ 447 : "cc") 448 449 #define __put_user_asm_byte(x, __pu_addr, err) \ 450 __put_user_asm(x, __pu_addr, err, strb) 451 452 #if __LINUX_ARM_ARCH__ >= 6 453 454 #define __put_user_asm_half(x, __pu_addr, err) \ 455 __put_user_asm(x, __pu_addr, err, strh) 456 457 #else 458 459 #ifndef __ARMEB__ 460 #define __put_user_asm_half(x, __pu_addr, err) \ 461 ({ \ 462 unsigned long __temp = (__force unsigned long)(x); \ 463 __put_user_asm_byte(__temp, __pu_addr, err); \ 464 __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \ 465 }) 466 #else 467 #define __put_user_asm_half(x, __pu_addr, err) \ 468 ({ \ 469 unsigned long __temp = (__force unsigned long)(x); \ 470 __put_user_asm_byte(__temp >> 8, __pu_addr, err); \ 471 __put_user_asm_byte(__temp, __pu_addr + 1, err); \ 472 }) 473 #endif 474 475 #endif /* __LINUX_ARM_ARCH__ >= 6 */ 476 477 #define __put_user_asm_word(x, __pu_addr, err) \ 478 __put_user_asm(x, __pu_addr, err, str) 479 480 #ifndef __ARMEB__ 481 #define __reg_oper0 "%R2" 482 #define __reg_oper1 "%Q2" 483 #else 484 #define __reg_oper0 "%Q2" 485 #define __reg_oper1 "%R2" 486 #endif 487 488 #define __put_user_asm_dword(x, __pu_addr, err) \ 489 __asm__ __volatile__( \ 490 ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \ 491 ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \ 492 THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \ 493 THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \ 494 "3:\n" \ 495 " .pushsection .text.fixup,\"ax\"\n" \ 496 " .align 2\n" \ 497 "4: mov %0, %3\n" \ 498 " b 3b\n" \ 499 " .popsection\n" \ 500 " .pushsection __ex_table,\"a\"\n" \ 501 " .align 3\n" \ 502 " .long 1b, 4b\n" \ 503 " .long 2b, 4b\n" \ 504 " .popsection" \ 505 : "+r" (err), "+r" (__pu_addr) \ 506 : "r" (x), "i" (-EFAULT) \ 507 : "cc") 508 509 #endif /* !CONFIG_CPU_SPECTRE */ 510 511 #ifdef CONFIG_MMU 512 extern unsigned long __must_check 513 arm_copy_from_user(void *to, const void __user *from, unsigned long n); 514 515 static inline unsigned long __must_check 516 raw_copy_from_user(void *to, const void __user *from, unsigned long n) 517 { 518 unsigned int __ua_flags; 519 520 __ua_flags = uaccess_save_and_enable(); 521 n = arm_copy_from_user(to, from, n); 522 uaccess_restore(__ua_flags); 523 return n; 524 } 525 526 extern unsigned long __must_check 527 arm_copy_to_user(void __user *to, const void *from, unsigned long n); 528 extern unsigned long __must_check 529 __copy_to_user_std(void __user *to, const void *from, unsigned long n); 530 531 static inline unsigned long __must_check 532 raw_copy_to_user(void __user *to, const void *from, unsigned long n) 533 { 534 #ifndef CONFIG_UACCESS_WITH_MEMCPY 535 unsigned int __ua_flags; 536 __ua_flags = uaccess_save_and_enable(); 537 n = arm_copy_to_user(to, from, n); 538 uaccess_restore(__ua_flags); 539 return n; 540 #else 541 return arm_copy_to_user(to, from, n); 542 #endif 543 } 544 545 extern unsigned long __must_check 546 arm_clear_user(void __user *addr, unsigned long n); 547 extern unsigned long __must_check 548 __clear_user_std(void __user *addr, unsigned long n); 549 550 static inline unsigned long __must_check 551 __clear_user(void __user *addr, unsigned long n) 552 { 553 unsigned int __ua_flags = uaccess_save_and_enable(); 554 n = arm_clear_user(addr, n); 555 uaccess_restore(__ua_flags); 556 return n; 557 } 558 559 #else 560 static inline unsigned long 561 raw_copy_from_user(void *to, const void __user *from, unsigned long n) 562 { 563 memcpy(to, (const void __force *)from, n); 564 return 0; 565 } 566 static inline unsigned long 567 raw_copy_to_user(void __user *to, const void *from, unsigned long n) 568 { 569 memcpy((void __force *)to, from, n); 570 return 0; 571 } 572 #define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0) 573 #endif 574 #define INLINE_COPY_TO_USER 575 #define INLINE_COPY_FROM_USER 576 577 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) 578 { 579 if (access_ok(to, n)) 580 n = __clear_user(to, n); 581 return n; 582 } 583 584 /* These are from lib/ code, and use __get_user() and friends */ 585 extern long strncpy_from_user(char *dest, const char __user *src, long count); 586 587 extern __must_check long strnlen_user(const char __user *str, long n); 588 589 #endif /* _ASMARM_UACCESS_H */ 590