1 /* 2 * arch/arm/include/asm/uaccess.h 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8 #ifndef _ASMARM_UACCESS_H 9 #define _ASMARM_UACCESS_H 10 11 /* 12 * User space memory access functions 13 */ 14 #include <linux/string.h> 15 #include <asm/memory.h> 16 #include <asm/domain.h> 17 #include <asm/unified.h> 18 #include <asm/compiler.h> 19 20 #include <asm/extable.h> 21 22 /* 23 * These two functions allow hooking accesses to userspace to increase 24 * system integrity by ensuring that the kernel can not inadvertantly 25 * perform such accesses (eg, via list poison values) which could then 26 * be exploited for priviledge escalation. 27 */ 28 static inline unsigned int uaccess_save_and_enable(void) 29 { 30 #ifdef CONFIG_CPU_SW_DOMAIN_PAN 31 unsigned int old_domain = get_domain(); 32 33 /* Set the current domain access to permit user accesses */ 34 set_domain((old_domain & ~domain_mask(DOMAIN_USER)) | 35 domain_val(DOMAIN_USER, DOMAIN_CLIENT)); 36 37 return old_domain; 38 #else 39 return 0; 40 #endif 41 } 42 43 static inline void uaccess_restore(unsigned int flags) 44 { 45 #ifdef CONFIG_CPU_SW_DOMAIN_PAN 46 /* Restore the user access mask */ 47 set_domain(flags); 48 #endif 49 } 50 51 /* 52 * These two are intentionally not defined anywhere - if the kernel 53 * code generates any references to them, that's a bug. 54 */ 55 extern int __get_user_bad(void); 56 extern int __put_user_bad(void); 57 58 /* 59 * Note that this is actually 0x1,0000,0000 60 */ 61 #define KERNEL_DS 0x00000000 62 #define get_ds() (KERNEL_DS) 63 64 #ifdef CONFIG_MMU 65 66 #define USER_DS TASK_SIZE 67 #define get_fs() (current_thread_info()->addr_limit) 68 69 static inline void set_fs(mm_segment_t fs) 70 { 71 current_thread_info()->addr_limit = fs; 72 modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); 73 /* On user-mode return, check fs is correct */ 74 set_thread_flag(TIF_FSCHECK); 75 } 76 77 #define segment_eq(a, b) ((a) == (b)) 78 79 /* We use 33-bit arithmetic here... */ 80 #define __range_ok(addr, size) ({ \ 81 unsigned long flag, roksum; \ 82 __chk_user_ptr(addr); \ 83 __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \ 84 : "=&r" (flag), "=&r" (roksum) \ 85 : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \ 86 : "cc"); \ 87 flag; }) 88 89 /* 90 * Single-value transfer routines. They automatically use the right 91 * size if we just have the right pointer type. Note that the functions 92 * which read from user space (*get_*) need to take care not to leak 93 * kernel data even if the calling code is buggy and fails to check 94 * the return value. This means zeroing out the destination variable 95 * or buffer on error. Normally this is done out of line by the 96 * fixup code, but there are a few places where it intrudes on the 97 * main code path. When we only write to user space, there is no 98 * problem. 99 */ 100 extern int __get_user_1(void *); 101 extern int __get_user_2(void *); 102 extern int __get_user_4(void *); 103 extern int __get_user_32t_8(void *); 104 extern int __get_user_8(void *); 105 extern int __get_user_64t_1(void *); 106 extern int __get_user_64t_2(void *); 107 extern int __get_user_64t_4(void *); 108 109 #define __GUP_CLOBBER_1 "lr", "cc" 110 #ifdef CONFIG_CPU_USE_DOMAINS 111 #define __GUP_CLOBBER_2 "ip", "lr", "cc" 112 #else 113 #define __GUP_CLOBBER_2 "lr", "cc" 114 #endif 115 #define __GUP_CLOBBER_4 "lr", "cc" 116 #define __GUP_CLOBBER_32t_8 "lr", "cc" 117 #define __GUP_CLOBBER_8 "lr", "cc" 118 119 #define __get_user_x(__r2, __p, __e, __l, __s) \ 120 __asm__ __volatile__ ( \ 121 __asmeq("%0", "r0") __asmeq("%1", "r2") \ 122 __asmeq("%3", "r1") \ 123 "bl __get_user_" #__s \ 124 : "=&r" (__e), "=r" (__r2) \ 125 : "0" (__p), "r" (__l) \ 126 : __GUP_CLOBBER_##__s) 127 128 /* narrowing a double-word get into a single 32bit word register: */ 129 #ifdef __ARMEB__ 130 #define __get_user_x_32t(__r2, __p, __e, __l, __s) \ 131 __get_user_x(__r2, __p, __e, __l, 32t_8) 132 #else 133 #define __get_user_x_32t __get_user_x 134 #endif 135 136 /* 137 * storing result into proper least significant word of 64bit target var, 138 * different only for big endian case where 64 bit __r2 lsw is r3: 139 */ 140 #ifdef __ARMEB__ 141 #define __get_user_x_64t(__r2, __p, __e, __l, __s) \ 142 __asm__ __volatile__ ( \ 143 __asmeq("%0", "r0") __asmeq("%1", "r2") \ 144 __asmeq("%3", "r1") \ 145 "bl __get_user_64t_" #__s \ 146 : "=&r" (__e), "=r" (__r2) \ 147 : "0" (__p), "r" (__l) \ 148 : __GUP_CLOBBER_##__s) 149 #else 150 #define __get_user_x_64t __get_user_x 151 #endif 152 153 154 #define __get_user_check(x, p) \ 155 ({ \ 156 unsigned long __limit = current_thread_info()->addr_limit - 1; \ 157 register const typeof(*(p)) __user *__p asm("r0") = (p);\ 158 register typeof(x) __r2 asm("r2"); \ 159 register unsigned long __l asm("r1") = __limit; \ 160 register int __e asm("r0"); \ 161 unsigned int __ua_flags = uaccess_save_and_enable(); \ 162 switch (sizeof(*(__p))) { \ 163 case 1: \ 164 if (sizeof((x)) >= 8) \ 165 __get_user_x_64t(__r2, __p, __e, __l, 1); \ 166 else \ 167 __get_user_x(__r2, __p, __e, __l, 1); \ 168 break; \ 169 case 2: \ 170 if (sizeof((x)) >= 8) \ 171 __get_user_x_64t(__r2, __p, __e, __l, 2); \ 172 else \ 173 __get_user_x(__r2, __p, __e, __l, 2); \ 174 break; \ 175 case 4: \ 176 if (sizeof((x)) >= 8) \ 177 __get_user_x_64t(__r2, __p, __e, __l, 4); \ 178 else \ 179 __get_user_x(__r2, __p, __e, __l, 4); \ 180 break; \ 181 case 8: \ 182 if (sizeof((x)) < 8) \ 183 __get_user_x_32t(__r2, __p, __e, __l, 4); \ 184 else \ 185 __get_user_x(__r2, __p, __e, __l, 8); \ 186 break; \ 187 default: __e = __get_user_bad(); break; \ 188 } \ 189 uaccess_restore(__ua_flags); \ 190 x = (typeof(*(p))) __r2; \ 191 __e; \ 192 }) 193 194 #define get_user(x, p) \ 195 ({ \ 196 might_fault(); \ 197 __get_user_check(x, p); \ 198 }) 199 200 extern int __put_user_1(void *, unsigned int); 201 extern int __put_user_2(void *, unsigned int); 202 extern int __put_user_4(void *, unsigned int); 203 extern int __put_user_8(void *, unsigned long long); 204 205 #define __put_user_check(__pu_val, __ptr, __err, __s) \ 206 ({ \ 207 unsigned long __limit = current_thread_info()->addr_limit - 1; \ 208 register typeof(__pu_val) __r2 asm("r2") = __pu_val; \ 209 register const void __user *__p asm("r0") = __ptr; \ 210 register unsigned long __l asm("r1") = __limit; \ 211 register int __e asm("r0"); \ 212 __asm__ __volatile__ ( \ 213 __asmeq("%0", "r0") __asmeq("%2", "r2") \ 214 __asmeq("%3", "r1") \ 215 "bl __put_user_" #__s \ 216 : "=&r" (__e) \ 217 : "0" (__p), "r" (__r2), "r" (__l) \ 218 : "ip", "lr", "cc"); \ 219 __err = __e; \ 220 }) 221 222 #else /* CONFIG_MMU */ 223 224 /* 225 * uClinux has only one addr space, so has simplified address limits. 226 */ 227 #define USER_DS KERNEL_DS 228 229 #define segment_eq(a, b) (1) 230 #define __addr_ok(addr) ((void)(addr), 1) 231 #define __range_ok(addr, size) ((void)(addr), 0) 232 #define get_fs() (KERNEL_DS) 233 234 static inline void set_fs(mm_segment_t fs) 235 { 236 } 237 238 #define get_user(x, p) __get_user(x, p) 239 #define __put_user_check __put_user_nocheck 240 241 #endif /* CONFIG_MMU */ 242 243 #define access_ok(type, addr, size) (__range_ok(addr, size) == 0) 244 245 #define user_addr_max() \ 246 (uaccess_kernel() ? ~0UL : get_fs()) 247 248 /* 249 * The "__xxx" versions of the user access functions do not verify the 250 * address space - it must have been done previously with a separate 251 * "access_ok()" call. 252 * 253 * The "xxx_error" versions set the third argument to EFAULT if an 254 * error occurs, and leave it unchanged on success. Note that these 255 * versions are void (ie, don't return a value as such). 256 */ 257 #define __get_user(x, ptr) \ 258 ({ \ 259 long __gu_err = 0; \ 260 __get_user_err((x), (ptr), __gu_err); \ 261 __gu_err; \ 262 }) 263 264 #define __get_user_error(x, ptr, err) \ 265 ({ \ 266 __get_user_err((x), (ptr), err); \ 267 (void) 0; \ 268 }) 269 270 #define __get_user_err(x, ptr, err) \ 271 do { \ 272 unsigned long __gu_addr = (unsigned long)(ptr); \ 273 unsigned long __gu_val; \ 274 unsigned int __ua_flags; \ 275 __chk_user_ptr(ptr); \ 276 might_fault(); \ 277 __ua_flags = uaccess_save_and_enable(); \ 278 switch (sizeof(*(ptr))) { \ 279 case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \ 280 case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \ 281 case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \ 282 default: (__gu_val) = __get_user_bad(); \ 283 } \ 284 uaccess_restore(__ua_flags); \ 285 (x) = (__typeof__(*(ptr)))__gu_val; \ 286 } while (0) 287 288 #define __get_user_asm(x, addr, err, instr) \ 289 __asm__ __volatile__( \ 290 "1: " TUSER(instr) " %1, [%2], #0\n" \ 291 "2:\n" \ 292 " .pushsection .text.fixup,\"ax\"\n" \ 293 " .align 2\n" \ 294 "3: mov %0, %3\n" \ 295 " mov %1, #0\n" \ 296 " b 2b\n" \ 297 " .popsection\n" \ 298 " .pushsection __ex_table,\"a\"\n" \ 299 " .align 3\n" \ 300 " .long 1b, 3b\n" \ 301 " .popsection" \ 302 : "+r" (err), "=&r" (x) \ 303 : "r" (addr), "i" (-EFAULT) \ 304 : "cc") 305 306 #define __get_user_asm_byte(x, addr, err) \ 307 __get_user_asm(x, addr, err, ldrb) 308 309 #ifndef __ARMEB__ 310 #define __get_user_asm_half(x, __gu_addr, err) \ 311 ({ \ 312 unsigned long __b1, __b2; \ 313 __get_user_asm_byte(__b1, __gu_addr, err); \ 314 __get_user_asm_byte(__b2, __gu_addr + 1, err); \ 315 (x) = __b1 | (__b2 << 8); \ 316 }) 317 #else 318 #define __get_user_asm_half(x, __gu_addr, err) \ 319 ({ \ 320 unsigned long __b1, __b2; \ 321 __get_user_asm_byte(__b1, __gu_addr, err); \ 322 __get_user_asm_byte(__b2, __gu_addr + 1, err); \ 323 (x) = (__b1 << 8) | __b2; \ 324 }) 325 #endif 326 327 #define __get_user_asm_word(x, addr, err) \ 328 __get_user_asm(x, addr, err, ldr) 329 330 331 #define __put_user_switch(x, ptr, __err, __fn) \ 332 do { \ 333 const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \ 334 __typeof__(*(ptr)) __pu_val = (x); \ 335 unsigned int __ua_flags; \ 336 might_fault(); \ 337 __ua_flags = uaccess_save_and_enable(); \ 338 switch (sizeof(*(ptr))) { \ 339 case 1: __fn(__pu_val, __pu_ptr, __err, 1); break; \ 340 case 2: __fn(__pu_val, __pu_ptr, __err, 2); break; \ 341 case 4: __fn(__pu_val, __pu_ptr, __err, 4); break; \ 342 case 8: __fn(__pu_val, __pu_ptr, __err, 8); break; \ 343 default: __err = __put_user_bad(); break; \ 344 } \ 345 uaccess_restore(__ua_flags); \ 346 } while (0) 347 348 #define put_user(x, ptr) \ 349 ({ \ 350 int __pu_err = 0; \ 351 __put_user_switch((x), (ptr), __pu_err, __put_user_check); \ 352 __pu_err; \ 353 }) 354 355 #define __put_user(x, ptr) \ 356 ({ \ 357 long __pu_err = 0; \ 358 __put_user_switch((x), (ptr), __pu_err, __put_user_nocheck); \ 359 __pu_err; \ 360 }) 361 362 #define __put_user_error(x, ptr, err) \ 363 ({ \ 364 __put_user_switch((x), (ptr), (err), __put_user_nocheck); \ 365 (void) 0; \ 366 }) 367 368 #define __put_user_nocheck(x, __pu_ptr, __err, __size) \ 369 do { \ 370 unsigned long __pu_addr = (unsigned long)__pu_ptr; \ 371 __put_user_nocheck_##__size(x, __pu_addr, __err); \ 372 } while (0) 373 374 #define __put_user_nocheck_1 __put_user_asm_byte 375 #define __put_user_nocheck_2 __put_user_asm_half 376 #define __put_user_nocheck_4 __put_user_asm_word 377 #define __put_user_nocheck_8 __put_user_asm_dword 378 379 #define __put_user_asm(x, __pu_addr, err, instr) \ 380 __asm__ __volatile__( \ 381 "1: " TUSER(instr) " %1, [%2], #0\n" \ 382 "2:\n" \ 383 " .pushsection .text.fixup,\"ax\"\n" \ 384 " .align 2\n" \ 385 "3: mov %0, %3\n" \ 386 " b 2b\n" \ 387 " .popsection\n" \ 388 " .pushsection __ex_table,\"a\"\n" \ 389 " .align 3\n" \ 390 " .long 1b, 3b\n" \ 391 " .popsection" \ 392 : "+r" (err) \ 393 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ 394 : "cc") 395 396 #define __put_user_asm_byte(x, __pu_addr, err) \ 397 __put_user_asm(x, __pu_addr, err, strb) 398 399 #ifndef __ARMEB__ 400 #define __put_user_asm_half(x, __pu_addr, err) \ 401 ({ \ 402 unsigned long __temp = (__force unsigned long)(x); \ 403 __put_user_asm_byte(__temp, __pu_addr, err); \ 404 __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \ 405 }) 406 #else 407 #define __put_user_asm_half(x, __pu_addr, err) \ 408 ({ \ 409 unsigned long __temp = (__force unsigned long)(x); \ 410 __put_user_asm_byte(__temp >> 8, __pu_addr, err); \ 411 __put_user_asm_byte(__temp, __pu_addr + 1, err); \ 412 }) 413 #endif 414 415 #define __put_user_asm_word(x, __pu_addr, err) \ 416 __put_user_asm(x, __pu_addr, err, str) 417 418 #ifndef __ARMEB__ 419 #define __reg_oper0 "%R2" 420 #define __reg_oper1 "%Q2" 421 #else 422 #define __reg_oper0 "%Q2" 423 #define __reg_oper1 "%R2" 424 #endif 425 426 #define __put_user_asm_dword(x, __pu_addr, err) \ 427 __asm__ __volatile__( \ 428 ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \ 429 ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \ 430 THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \ 431 THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \ 432 "3:\n" \ 433 " .pushsection .text.fixup,\"ax\"\n" \ 434 " .align 2\n" \ 435 "4: mov %0, %3\n" \ 436 " b 3b\n" \ 437 " .popsection\n" \ 438 " .pushsection __ex_table,\"a\"\n" \ 439 " .align 3\n" \ 440 " .long 1b, 4b\n" \ 441 " .long 2b, 4b\n" \ 442 " .popsection" \ 443 : "+r" (err), "+r" (__pu_addr) \ 444 : "r" (x), "i" (-EFAULT) \ 445 : "cc") 446 447 448 #ifdef CONFIG_MMU 449 extern unsigned long __must_check 450 arm_copy_from_user(void *to, const void __user *from, unsigned long n); 451 452 static inline unsigned long __must_check 453 raw_copy_from_user(void *to, const void __user *from, unsigned long n) 454 { 455 unsigned int __ua_flags; 456 457 __ua_flags = uaccess_save_and_enable(); 458 n = arm_copy_from_user(to, from, n); 459 uaccess_restore(__ua_flags); 460 return n; 461 } 462 463 extern unsigned long __must_check 464 arm_copy_to_user(void __user *to, const void *from, unsigned long n); 465 extern unsigned long __must_check 466 __copy_to_user_std(void __user *to, const void *from, unsigned long n); 467 468 static inline unsigned long __must_check 469 raw_copy_to_user(void __user *to, const void *from, unsigned long n) 470 { 471 #ifndef CONFIG_UACCESS_WITH_MEMCPY 472 unsigned int __ua_flags; 473 __ua_flags = uaccess_save_and_enable(); 474 n = arm_copy_to_user(to, from, n); 475 uaccess_restore(__ua_flags); 476 return n; 477 #else 478 return arm_copy_to_user(to, from, n); 479 #endif 480 } 481 482 extern unsigned long __must_check 483 arm_clear_user(void __user *addr, unsigned long n); 484 extern unsigned long __must_check 485 __clear_user_std(void __user *addr, unsigned long n); 486 487 static inline unsigned long __must_check 488 __clear_user(void __user *addr, unsigned long n) 489 { 490 unsigned int __ua_flags = uaccess_save_and_enable(); 491 n = arm_clear_user(addr, n); 492 uaccess_restore(__ua_flags); 493 return n; 494 } 495 496 #else 497 static inline unsigned long 498 raw_copy_from_user(void *to, const void __user *from, unsigned long n) 499 { 500 memcpy(to, (const void __force *)from, n); 501 return 0; 502 } 503 static inline unsigned long 504 raw_copy_to_user(void __user *to, const void *from, unsigned long n) 505 { 506 memcpy((void __force *)to, from, n); 507 return 0; 508 } 509 #define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0) 510 #endif 511 #define INLINE_COPY_TO_USER 512 #define INLINE_COPY_FROM_USER 513 514 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) 515 { 516 if (access_ok(VERIFY_WRITE, to, n)) 517 n = __clear_user(to, n); 518 return n; 519 } 520 521 /* These are from lib/ code, and use __get_user() and friends */ 522 extern long strncpy_from_user(char *dest, const char __user *src, long count); 523 524 extern __must_check long strnlen_user(const char __user *str, long n); 525 526 #endif /* _ASMARM_UACCESS_H */ 527