1 /* 2 * arch/arm/include/asm/uaccess.h 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8 #ifndef _ASMARM_UACCESS_H 9 #define _ASMARM_UACCESS_H 10 11 /* 12 * User space memory access functions 13 */ 14 #include <linux/string.h> 15 #include <linux/thread_info.h> 16 #include <asm/errno.h> 17 #include <asm/memory.h> 18 #include <asm/domain.h> 19 #include <asm/system.h> 20 21 #define VERIFY_READ 0 22 #define VERIFY_WRITE 1 23 24 /* 25 * The exception table consists of pairs of addresses: the first is the 26 * address of an instruction that is allowed to fault, and the second is 27 * the address at which the program should continue. No registers are 28 * modified, so it is entirely up to the continuation code to figure out 29 * what to do. 30 * 31 * All the routines below use bits of fixup code that are out of line 32 * with the main instruction path. This means when everything is well, 33 * we don't even have to jump over them. Further, they do not intrude 34 * on our cache or tlb entries. 35 */ 36 37 struct exception_table_entry 38 { 39 unsigned long insn, fixup; 40 }; 41 42 extern int fixup_exception(struct pt_regs *regs); 43 44 /* 45 * These two are intentionally not defined anywhere - if the kernel 46 * code generates any references to them, that's a bug. 47 */ 48 extern int __get_user_bad(void); 49 extern int __put_user_bad(void); 50 51 /* 52 * Note that this is actually 0x1,0000,0000 53 */ 54 #define KERNEL_DS 0x00000000 55 #define get_ds() (KERNEL_DS) 56 57 #ifdef CONFIG_MMU 58 59 #define USER_DS TASK_SIZE 60 #define get_fs() (current_thread_info()->addr_limit) 61 62 static inline void set_fs(mm_segment_t fs) 63 { 64 current_thread_info()->addr_limit = fs; 65 modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); 66 } 67 68 #define segment_eq(a,b) ((a) == (b)) 69 70 #define __addr_ok(addr) ({ \ 71 unsigned long flag; \ 72 __asm__("cmp %2, %0; movlo %0, #0" \ 73 : "=&r" (flag) \ 74 : "0" (current_thread_info()->addr_limit), "r" (addr) \ 75 : "cc"); \ 76 (flag == 0); }) 77 78 /* We use 33-bit arithmetic here... */ 79 #define __range_ok(addr,size) ({ \ 80 unsigned long flag, roksum; \ 81 __chk_user_ptr(addr); \ 82 __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \ 83 : "=&r" (flag), "=&r" (roksum) \ 84 : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \ 85 : "cc"); \ 86 flag; }) 87 88 /* 89 * Single-value transfer routines. They automatically use the right 90 * size if we just have the right pointer type. Note that the functions 91 * which read from user space (*get_*) need to take care not to leak 92 * kernel data even if the calling code is buggy and fails to check 93 * the return value. This means zeroing out the destination variable 94 * or buffer on error. Normally this is done out of line by the 95 * fixup code, but there are a few places where it intrudes on the 96 * main code path. When we only write to user space, there is no 97 * problem. 98 */ 99 extern int __get_user_1(void *); 100 extern int __get_user_2(void *); 101 extern int __get_user_4(void *); 102 103 #define __get_user_x(__r2,__p,__e,__s,__i...) \ 104 __asm__ __volatile__ ( \ 105 __asmeq("%0", "r0") __asmeq("%1", "r2") \ 106 "bl __get_user_" #__s \ 107 : "=&r" (__e), "=r" (__r2) \ 108 : "0" (__p) \ 109 : __i, "cc") 110 111 #define get_user(x,p) \ 112 ({ \ 113 register const typeof(*(p)) __user *__p asm("r0") = (p);\ 114 register unsigned long __r2 asm("r2"); \ 115 register int __e asm("r0"); \ 116 switch (sizeof(*(__p))) { \ 117 case 1: \ 118 __get_user_x(__r2, __p, __e, 1, "lr"); \ 119 break; \ 120 case 2: \ 121 __get_user_x(__r2, __p, __e, 2, "r3", "lr"); \ 122 break; \ 123 case 4: \ 124 __get_user_x(__r2, __p, __e, 4, "lr"); \ 125 break; \ 126 default: __e = __get_user_bad(); break; \ 127 } \ 128 x = (typeof(*(p))) __r2; \ 129 __e; \ 130 }) 131 132 extern int __put_user_1(void *, unsigned int); 133 extern int __put_user_2(void *, unsigned int); 134 extern int __put_user_4(void *, unsigned int); 135 extern int __put_user_8(void *, unsigned long long); 136 137 #define __put_user_x(__r2,__p,__e,__s) \ 138 __asm__ __volatile__ ( \ 139 __asmeq("%0", "r0") __asmeq("%2", "r2") \ 140 "bl __put_user_" #__s \ 141 : "=&r" (__e) \ 142 : "0" (__p), "r" (__r2) \ 143 : "ip", "lr", "cc") 144 145 #define put_user(x,p) \ 146 ({ \ 147 register const typeof(*(p)) __r2 asm("r2") = (x); \ 148 register const typeof(*(p)) __user *__p asm("r0") = (p);\ 149 register int __e asm("r0"); \ 150 switch (sizeof(*(__p))) { \ 151 case 1: \ 152 __put_user_x(__r2, __p, __e, 1); \ 153 break; \ 154 case 2: \ 155 __put_user_x(__r2, __p, __e, 2); \ 156 break; \ 157 case 4: \ 158 __put_user_x(__r2, __p, __e, 4); \ 159 break; \ 160 case 8: \ 161 __put_user_x(__r2, __p, __e, 8); \ 162 break; \ 163 default: __e = __put_user_bad(); break; \ 164 } \ 165 __e; \ 166 }) 167 168 #else /* CONFIG_MMU */ 169 170 /* 171 * uClinux has only one addr space, so has simplified address limits. 172 */ 173 #define USER_DS KERNEL_DS 174 175 #define segment_eq(a,b) (1) 176 #define __addr_ok(addr) (1) 177 #define __range_ok(addr,size) (0) 178 #define get_fs() (KERNEL_DS) 179 180 static inline void set_fs(mm_segment_t fs) 181 { 182 } 183 184 #define get_user(x,p) __get_user(x,p) 185 #define put_user(x,p) __put_user(x,p) 186 187 #endif /* CONFIG_MMU */ 188 189 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0) 190 191 /* 192 * The "__xxx" versions of the user access functions do not verify the 193 * address space - it must have been done previously with a separate 194 * "access_ok()" call. 195 * 196 * The "xxx_error" versions set the third argument to EFAULT if an 197 * error occurs, and leave it unchanged on success. Note that these 198 * versions are void (ie, don't return a value as such). 199 */ 200 #define __get_user(x,ptr) \ 201 ({ \ 202 long __gu_err = 0; \ 203 __get_user_err((x),(ptr),__gu_err); \ 204 __gu_err; \ 205 }) 206 207 #define __get_user_error(x,ptr,err) \ 208 ({ \ 209 __get_user_err((x),(ptr),err); \ 210 (void) 0; \ 211 }) 212 213 #define __get_user_err(x,ptr,err) \ 214 do { \ 215 unsigned long __gu_addr = (unsigned long)(ptr); \ 216 unsigned long __gu_val; \ 217 __chk_user_ptr(ptr); \ 218 switch (sizeof(*(ptr))) { \ 219 case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \ 220 case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \ 221 case 4: __get_user_asm_word(__gu_val,__gu_addr,err); break; \ 222 default: (__gu_val) = __get_user_bad(); \ 223 } \ 224 (x) = (__typeof__(*(ptr)))__gu_val; \ 225 } while (0) 226 227 #define __get_user_asm_byte(x,addr,err) \ 228 __asm__ __volatile__( \ 229 "1: ldrbt %1,[%2]\n" \ 230 "2:\n" \ 231 " .section .fixup,\"ax\"\n" \ 232 " .align 2\n" \ 233 "3: mov %0, %3\n" \ 234 " mov %1, #0\n" \ 235 " b 2b\n" \ 236 " .previous\n" \ 237 " .section __ex_table,\"a\"\n" \ 238 " .align 3\n" \ 239 " .long 1b, 3b\n" \ 240 " .previous" \ 241 : "+r" (err), "=&r" (x) \ 242 : "r" (addr), "i" (-EFAULT) \ 243 : "cc") 244 245 #ifndef __ARMEB__ 246 #define __get_user_asm_half(x,__gu_addr,err) \ 247 ({ \ 248 unsigned long __b1, __b2; \ 249 __get_user_asm_byte(__b1, __gu_addr, err); \ 250 __get_user_asm_byte(__b2, __gu_addr + 1, err); \ 251 (x) = __b1 | (__b2 << 8); \ 252 }) 253 #else 254 #define __get_user_asm_half(x,__gu_addr,err) \ 255 ({ \ 256 unsigned long __b1, __b2; \ 257 __get_user_asm_byte(__b1, __gu_addr, err); \ 258 __get_user_asm_byte(__b2, __gu_addr + 1, err); \ 259 (x) = (__b1 << 8) | __b2; \ 260 }) 261 #endif 262 263 #define __get_user_asm_word(x,addr,err) \ 264 __asm__ __volatile__( \ 265 "1: ldrt %1,[%2]\n" \ 266 "2:\n" \ 267 " .section .fixup,\"ax\"\n" \ 268 " .align 2\n" \ 269 "3: mov %0, %3\n" \ 270 " mov %1, #0\n" \ 271 " b 2b\n" \ 272 " .previous\n" \ 273 " .section __ex_table,\"a\"\n" \ 274 " .align 3\n" \ 275 " .long 1b, 3b\n" \ 276 " .previous" \ 277 : "+r" (err), "=&r" (x) \ 278 : "r" (addr), "i" (-EFAULT) \ 279 : "cc") 280 281 #define __put_user(x,ptr) \ 282 ({ \ 283 long __pu_err = 0; \ 284 __put_user_err((x),(ptr),__pu_err); \ 285 __pu_err; \ 286 }) 287 288 #define __put_user_error(x,ptr,err) \ 289 ({ \ 290 __put_user_err((x),(ptr),err); \ 291 (void) 0; \ 292 }) 293 294 #define __put_user_err(x,ptr,err) \ 295 do { \ 296 unsigned long __pu_addr = (unsigned long)(ptr); \ 297 __typeof__(*(ptr)) __pu_val = (x); \ 298 __chk_user_ptr(ptr); \ 299 switch (sizeof(*(ptr))) { \ 300 case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \ 301 case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \ 302 case 4: __put_user_asm_word(__pu_val,__pu_addr,err); break; \ 303 case 8: __put_user_asm_dword(__pu_val,__pu_addr,err); break; \ 304 default: __put_user_bad(); \ 305 } \ 306 } while (0) 307 308 #define __put_user_asm_byte(x,__pu_addr,err) \ 309 __asm__ __volatile__( \ 310 "1: strbt %1,[%2]\n" \ 311 "2:\n" \ 312 " .section .fixup,\"ax\"\n" \ 313 " .align 2\n" \ 314 "3: mov %0, %3\n" \ 315 " b 2b\n" \ 316 " .previous\n" \ 317 " .section __ex_table,\"a\"\n" \ 318 " .align 3\n" \ 319 " .long 1b, 3b\n" \ 320 " .previous" \ 321 : "+r" (err) \ 322 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ 323 : "cc") 324 325 #ifndef __ARMEB__ 326 #define __put_user_asm_half(x,__pu_addr,err) \ 327 ({ \ 328 unsigned long __temp = (unsigned long)(x); \ 329 __put_user_asm_byte(__temp, __pu_addr, err); \ 330 __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \ 331 }) 332 #else 333 #define __put_user_asm_half(x,__pu_addr,err) \ 334 ({ \ 335 unsigned long __temp = (unsigned long)(x); \ 336 __put_user_asm_byte(__temp >> 8, __pu_addr, err); \ 337 __put_user_asm_byte(__temp, __pu_addr + 1, err); \ 338 }) 339 #endif 340 341 #define __put_user_asm_word(x,__pu_addr,err) \ 342 __asm__ __volatile__( \ 343 "1: strt %1,[%2]\n" \ 344 "2:\n" \ 345 " .section .fixup,\"ax\"\n" \ 346 " .align 2\n" \ 347 "3: mov %0, %3\n" \ 348 " b 2b\n" \ 349 " .previous\n" \ 350 " .section __ex_table,\"a\"\n" \ 351 " .align 3\n" \ 352 " .long 1b, 3b\n" \ 353 " .previous" \ 354 : "+r" (err) \ 355 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ 356 : "cc") 357 358 #ifndef __ARMEB__ 359 #define __reg_oper0 "%R2" 360 #define __reg_oper1 "%Q2" 361 #else 362 #define __reg_oper0 "%Q2" 363 #define __reg_oper1 "%R2" 364 #endif 365 366 #define __put_user_asm_dword(x,__pu_addr,err) \ 367 __asm__ __volatile__( \ 368 "1: strt " __reg_oper1 ", [%1], #4\n" \ 369 "2: strt " __reg_oper0 ", [%1]\n" \ 370 "3:\n" \ 371 " .section .fixup,\"ax\"\n" \ 372 " .align 2\n" \ 373 "4: mov %0, %3\n" \ 374 " b 3b\n" \ 375 " .previous\n" \ 376 " .section __ex_table,\"a\"\n" \ 377 " .align 3\n" \ 378 " .long 1b, 4b\n" \ 379 " .long 2b, 4b\n" \ 380 " .previous" \ 381 : "+r" (err), "+r" (__pu_addr) \ 382 : "r" (x), "i" (-EFAULT) \ 383 : "cc") 384 385 386 #ifdef CONFIG_MMU 387 extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n); 388 extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n); 389 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); 390 #else 391 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0) 392 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0) 393 #define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0) 394 #endif 395 396 extern unsigned long __must_check __strncpy_from_user(char *to, const char __user *from, unsigned long count); 397 extern unsigned long __must_check __strnlen_user(const char __user *s, long n); 398 399 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) 400 { 401 if (access_ok(VERIFY_READ, from, n)) 402 n = __copy_from_user(to, from, n); 403 else /* security hole - plug it */ 404 memset(to, 0, n); 405 return n; 406 } 407 408 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) 409 { 410 if (access_ok(VERIFY_WRITE, to, n)) 411 n = __copy_to_user(to, from, n); 412 return n; 413 } 414 415 #define __copy_to_user_inatomic __copy_to_user 416 #define __copy_from_user_inatomic __copy_from_user 417 418 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) 419 { 420 if (access_ok(VERIFY_WRITE, to, n)) 421 n = __clear_user(to, n); 422 return n; 423 } 424 425 static inline long __must_check strncpy_from_user(char *dst, const char __user *src, long count) 426 { 427 long res = -EFAULT; 428 if (access_ok(VERIFY_READ, src, 1)) 429 res = __strncpy_from_user(dst, src, count); 430 return res; 431 } 432 433 #define strlen_user(s) strnlen_user(s, ~0UL >> 1) 434 435 static inline long __must_check strnlen_user(const char __user *s, long n) 436 { 437 unsigned long res = 0; 438 439 if (__addr_ok(s)) 440 res = __strnlen_user(s, n); 441 442 return res; 443 } 444 445 #endif /* _ASMARM_UACCESS_H */ 446