1 /* 2 * arch/arm/include/asm/uaccess.h 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8 #ifndef _ASMARM_UACCESS_H 9 #define _ASMARM_UACCESS_H 10 11 /* 12 * User space memory access functions 13 */ 14 #include <linux/string.h> 15 #include <linux/thread_info.h> 16 #include <asm/errno.h> 17 #include <asm/memory.h> 18 #include <asm/domain.h> 19 #include <asm/unified.h> 20 #include <asm/compiler.h> 21 22 #define VERIFY_READ 0 23 #define VERIFY_WRITE 1 24 25 /* 26 * The exception table consists of pairs of addresses: the first is the 27 * address of an instruction that is allowed to fault, and the second is 28 * the address at which the program should continue. No registers are 29 * modified, so it is entirely up to the continuation code to figure out 30 * what to do. 31 * 32 * All the routines below use bits of fixup code that are out of line 33 * with the main instruction path. This means when everything is well, 34 * we don't even have to jump over them. Further, they do not intrude 35 * on our cache or tlb entries. 36 */ 37 38 struct exception_table_entry 39 { 40 unsigned long insn, fixup; 41 }; 42 43 extern int fixup_exception(struct pt_regs *regs); 44 45 /* 46 * These two are intentionally not defined anywhere - if the kernel 47 * code generates any references to them, that's a bug. 48 */ 49 extern int __get_user_bad(void); 50 extern int __put_user_bad(void); 51 52 /* 53 * Note that this is actually 0x1,0000,0000 54 */ 55 #define KERNEL_DS 0x00000000 56 #define get_ds() (KERNEL_DS) 57 58 #ifdef CONFIG_MMU 59 60 #define USER_DS TASK_SIZE 61 #define get_fs() (current_thread_info()->addr_limit) 62 63 static inline void set_fs(mm_segment_t fs) 64 { 65 current_thread_info()->addr_limit = fs; 66 modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); 67 } 68 69 #define segment_eq(a,b) ((a) == (b)) 70 71 #define __addr_ok(addr) ({ \ 72 unsigned long flag; \ 73 __asm__("cmp %2, %0; movlo %0, #0" \ 74 : "=&r" (flag) \ 75 : "0" (current_thread_info()->addr_limit), "r" (addr) \ 76 : "cc"); \ 77 (flag == 0); }) 78 79 /* We use 33-bit arithmetic here... */ 80 #define __range_ok(addr,size) ({ \ 81 unsigned long flag, roksum; \ 82 __chk_user_ptr(addr); \ 83 __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \ 84 : "=&r" (flag), "=&r" (roksum) \ 85 : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \ 86 : "cc"); \ 87 flag; }) 88 89 /* 90 * Single-value transfer routines. They automatically use the right 91 * size if we just have the right pointer type. Note that the functions 92 * which read from user space (*get_*) need to take care not to leak 93 * kernel data even if the calling code is buggy and fails to check 94 * the return value. This means zeroing out the destination variable 95 * or buffer on error. Normally this is done out of line by the 96 * fixup code, but there are a few places where it intrudes on the 97 * main code path. When we only write to user space, there is no 98 * problem. 99 */ 100 extern int __get_user_1(void *); 101 extern int __get_user_2(void *); 102 extern int __get_user_4(void *); 103 104 #define __get_user_x(__r2,__p,__e,__s,__i...) \ 105 __asm__ __volatile__ ( \ 106 __asmeq("%0", "r0") __asmeq("%1", "r2") \ 107 "bl __get_user_" #__s \ 108 : "=&r" (__e), "=r" (__r2) \ 109 : "0" (__p) \ 110 : __i, "cc") 111 112 #define get_user(x,p) \ 113 ({ \ 114 register const typeof(*(p)) __user *__p asm("r0") = (p);\ 115 register unsigned long __r2 asm("r2"); \ 116 register int __e asm("r0"); \ 117 switch (sizeof(*(__p))) { \ 118 case 1: \ 119 __get_user_x(__r2, __p, __e, 1, "lr"); \ 120 break; \ 121 case 2: \ 122 __get_user_x(__r2, __p, __e, 2, "r3", "lr"); \ 123 break; \ 124 case 4: \ 125 __get_user_x(__r2, __p, __e, 4, "lr"); \ 126 break; \ 127 default: __e = __get_user_bad(); break; \ 128 } \ 129 x = (typeof(*(p))) __r2; \ 130 __e; \ 131 }) 132 133 extern int __put_user_1(void *, unsigned int); 134 extern int __put_user_2(void *, unsigned int); 135 extern int __put_user_4(void *, unsigned int); 136 extern int __put_user_8(void *, unsigned long long); 137 138 #define __put_user_x(__r2,__p,__e,__s) \ 139 __asm__ __volatile__ ( \ 140 __asmeq("%0", "r0") __asmeq("%2", "r2") \ 141 "bl __put_user_" #__s \ 142 : "=&r" (__e) \ 143 : "0" (__p), "r" (__r2) \ 144 : "ip", "lr", "cc") 145 146 #define put_user(x,p) \ 147 ({ \ 148 register const typeof(*(p)) __r2 asm("r2") = (x); \ 149 register const typeof(*(p)) __user *__p asm("r0") = (p);\ 150 register int __e asm("r0"); \ 151 switch (sizeof(*(__p))) { \ 152 case 1: \ 153 __put_user_x(__r2, __p, __e, 1); \ 154 break; \ 155 case 2: \ 156 __put_user_x(__r2, __p, __e, 2); \ 157 break; \ 158 case 4: \ 159 __put_user_x(__r2, __p, __e, 4); \ 160 break; \ 161 case 8: \ 162 __put_user_x(__r2, __p, __e, 8); \ 163 break; \ 164 default: __e = __put_user_bad(); break; \ 165 } \ 166 __e; \ 167 }) 168 169 #else /* CONFIG_MMU */ 170 171 /* 172 * uClinux has only one addr space, so has simplified address limits. 173 */ 174 #define USER_DS KERNEL_DS 175 176 #define segment_eq(a,b) (1) 177 #define __addr_ok(addr) (1) 178 #define __range_ok(addr,size) (0) 179 #define get_fs() (KERNEL_DS) 180 181 static inline void set_fs(mm_segment_t fs) 182 { 183 } 184 185 #define get_user(x,p) __get_user(x,p) 186 #define put_user(x,p) __put_user(x,p) 187 188 #endif /* CONFIG_MMU */ 189 190 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0) 191 192 #define user_addr_max() \ 193 (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) 194 195 /* 196 * The "__xxx" versions of the user access functions do not verify the 197 * address space - it must have been done previously with a separate 198 * "access_ok()" call. 199 * 200 * The "xxx_error" versions set the third argument to EFAULT if an 201 * error occurs, and leave it unchanged on success. Note that these 202 * versions are void (ie, don't return a value as such). 203 */ 204 #define __get_user(x,ptr) \ 205 ({ \ 206 long __gu_err = 0; \ 207 __get_user_err((x),(ptr),__gu_err); \ 208 __gu_err; \ 209 }) 210 211 #define __get_user_error(x,ptr,err) \ 212 ({ \ 213 __get_user_err((x),(ptr),err); \ 214 (void) 0; \ 215 }) 216 217 #define __get_user_err(x,ptr,err) \ 218 do { \ 219 unsigned long __gu_addr = (unsigned long)(ptr); \ 220 unsigned long __gu_val; \ 221 __chk_user_ptr(ptr); \ 222 switch (sizeof(*(ptr))) { \ 223 case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \ 224 case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \ 225 case 4: __get_user_asm_word(__gu_val,__gu_addr,err); break; \ 226 default: (__gu_val) = __get_user_bad(); \ 227 } \ 228 (x) = (__typeof__(*(ptr)))__gu_val; \ 229 } while (0) 230 231 #define __get_user_asm_byte(x,addr,err) \ 232 __asm__ __volatile__( \ 233 "1: " TUSER(ldrb) " %1,[%2],#0\n" \ 234 "2:\n" \ 235 " .pushsection .fixup,\"ax\"\n" \ 236 " .align 2\n" \ 237 "3: mov %0, %3\n" \ 238 " mov %1, #0\n" \ 239 " b 2b\n" \ 240 " .popsection\n" \ 241 " .pushsection __ex_table,\"a\"\n" \ 242 " .align 3\n" \ 243 " .long 1b, 3b\n" \ 244 " .popsection" \ 245 : "+r" (err), "=&r" (x) \ 246 : "r" (addr), "i" (-EFAULT) \ 247 : "cc") 248 249 #ifndef __ARMEB__ 250 #define __get_user_asm_half(x,__gu_addr,err) \ 251 ({ \ 252 unsigned long __b1, __b2; \ 253 __get_user_asm_byte(__b1, __gu_addr, err); \ 254 __get_user_asm_byte(__b2, __gu_addr + 1, err); \ 255 (x) = __b1 | (__b2 << 8); \ 256 }) 257 #else 258 #define __get_user_asm_half(x,__gu_addr,err) \ 259 ({ \ 260 unsigned long __b1, __b2; \ 261 __get_user_asm_byte(__b1, __gu_addr, err); \ 262 __get_user_asm_byte(__b2, __gu_addr + 1, err); \ 263 (x) = (__b1 << 8) | __b2; \ 264 }) 265 #endif 266 267 #define __get_user_asm_word(x,addr,err) \ 268 __asm__ __volatile__( \ 269 "1: " TUSER(ldr) " %1,[%2],#0\n" \ 270 "2:\n" \ 271 " .pushsection .fixup,\"ax\"\n" \ 272 " .align 2\n" \ 273 "3: mov %0, %3\n" \ 274 " mov %1, #0\n" \ 275 " b 2b\n" \ 276 " .popsection\n" \ 277 " .pushsection __ex_table,\"a\"\n" \ 278 " .align 3\n" \ 279 " .long 1b, 3b\n" \ 280 " .popsection" \ 281 : "+r" (err), "=&r" (x) \ 282 : "r" (addr), "i" (-EFAULT) \ 283 : "cc") 284 285 #define __put_user(x,ptr) \ 286 ({ \ 287 long __pu_err = 0; \ 288 __put_user_err((x),(ptr),__pu_err); \ 289 __pu_err; \ 290 }) 291 292 #define __put_user_error(x,ptr,err) \ 293 ({ \ 294 __put_user_err((x),(ptr),err); \ 295 (void) 0; \ 296 }) 297 298 #define __put_user_err(x,ptr,err) \ 299 do { \ 300 unsigned long __pu_addr = (unsigned long)(ptr); \ 301 __typeof__(*(ptr)) __pu_val = (x); \ 302 __chk_user_ptr(ptr); \ 303 switch (sizeof(*(ptr))) { \ 304 case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \ 305 case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \ 306 case 4: __put_user_asm_word(__pu_val,__pu_addr,err); break; \ 307 case 8: __put_user_asm_dword(__pu_val,__pu_addr,err); break; \ 308 default: __put_user_bad(); \ 309 } \ 310 } while (0) 311 312 #define __put_user_asm_byte(x,__pu_addr,err) \ 313 __asm__ __volatile__( \ 314 "1: " TUSER(strb) " %1,[%2],#0\n" \ 315 "2:\n" \ 316 " .pushsection .fixup,\"ax\"\n" \ 317 " .align 2\n" \ 318 "3: mov %0, %3\n" \ 319 " b 2b\n" \ 320 " .popsection\n" \ 321 " .pushsection __ex_table,\"a\"\n" \ 322 " .align 3\n" \ 323 " .long 1b, 3b\n" \ 324 " .popsection" \ 325 : "+r" (err) \ 326 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ 327 : "cc") 328 329 #ifndef __ARMEB__ 330 #define __put_user_asm_half(x,__pu_addr,err) \ 331 ({ \ 332 unsigned long __temp = (unsigned long)(x); \ 333 __put_user_asm_byte(__temp, __pu_addr, err); \ 334 __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \ 335 }) 336 #else 337 #define __put_user_asm_half(x,__pu_addr,err) \ 338 ({ \ 339 unsigned long __temp = (unsigned long)(x); \ 340 __put_user_asm_byte(__temp >> 8, __pu_addr, err); \ 341 __put_user_asm_byte(__temp, __pu_addr + 1, err); \ 342 }) 343 #endif 344 345 #define __put_user_asm_word(x,__pu_addr,err) \ 346 __asm__ __volatile__( \ 347 "1: " TUSER(str) " %1,[%2],#0\n" \ 348 "2:\n" \ 349 " .pushsection .fixup,\"ax\"\n" \ 350 " .align 2\n" \ 351 "3: mov %0, %3\n" \ 352 " b 2b\n" \ 353 " .popsection\n" \ 354 " .pushsection __ex_table,\"a\"\n" \ 355 " .align 3\n" \ 356 " .long 1b, 3b\n" \ 357 " .popsection" \ 358 : "+r" (err) \ 359 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ 360 : "cc") 361 362 #ifndef __ARMEB__ 363 #define __reg_oper0 "%R2" 364 #define __reg_oper1 "%Q2" 365 #else 366 #define __reg_oper0 "%Q2" 367 #define __reg_oper1 "%R2" 368 #endif 369 370 #define __put_user_asm_dword(x,__pu_addr,err) \ 371 __asm__ __volatile__( \ 372 ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \ 373 ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \ 374 THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \ 375 THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \ 376 "3:\n" \ 377 " .pushsection .fixup,\"ax\"\n" \ 378 " .align 2\n" \ 379 "4: mov %0, %3\n" \ 380 " b 3b\n" \ 381 " .popsection\n" \ 382 " .pushsection __ex_table,\"a\"\n" \ 383 " .align 3\n" \ 384 " .long 1b, 4b\n" \ 385 " .long 2b, 4b\n" \ 386 " .popsection" \ 387 : "+r" (err), "+r" (__pu_addr) \ 388 : "r" (x), "i" (-EFAULT) \ 389 : "cc") 390 391 392 #ifdef CONFIG_MMU 393 extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n); 394 extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n); 395 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n); 396 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); 397 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n); 398 #else 399 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0) 400 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0) 401 #define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0) 402 #endif 403 404 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) 405 { 406 if (access_ok(VERIFY_READ, from, n)) 407 n = __copy_from_user(to, from, n); 408 else /* security hole - plug it */ 409 memset(to, 0, n); 410 return n; 411 } 412 413 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) 414 { 415 if (access_ok(VERIFY_WRITE, to, n)) 416 n = __copy_to_user(to, from, n); 417 return n; 418 } 419 420 #define __copy_to_user_inatomic __copy_to_user 421 #define __copy_from_user_inatomic __copy_from_user 422 423 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) 424 { 425 if (access_ok(VERIFY_WRITE, to, n)) 426 n = __clear_user(to, n); 427 return n; 428 } 429 430 extern long strncpy_from_user(char *dest, const char __user *src, long count); 431 432 extern __must_check long strlen_user(const char __user *str); 433 extern __must_check long strnlen_user(const char __user *str, long n); 434 435 #endif /* _ASMARM_UACCESS_H */ 436