1 #ifndef _ASM_X86_UACCESS_H 2 #define _ASM_X86_UACCESS_H 3 /* 4 * User space memory access functions 5 */ 6 #include <linux/errno.h> 7 #include <linux/compiler.h> 8 #include <linux/thread_info.h> 9 #include <linux/string.h> 10 #include <asm/asm.h> 11 #include <asm/page.h> 12 13 #define VERIFY_READ 0 14 #define VERIFY_WRITE 1 15 16 /* 17 * The fs value determines whether argument validity checking should be 18 * performed or not. If get_fs() == USER_DS, checking is performed, with 19 * get_fs() == KERNEL_DS, checking is bypassed. 20 * 21 * For historical reasons, these macros are grossly misnamed. 22 */ 23 24 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 25 26 #define KERNEL_DS MAKE_MM_SEG(-1UL) 27 #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX) 28 29 #define get_ds() (KERNEL_DS) 30 #define get_fs() (current_thread_info()->addr_limit) 31 #define set_fs(x) (current_thread_info()->addr_limit = (x)) 32 33 #define segment_eq(a, b) ((a).seg == (b).seg) 34 35 #define user_addr_max() (current_thread_info()->addr_limit.seg) 36 #define __addr_ok(addr) \ 37 ((unsigned long __force)(addr) < \ 38 (current_thread_info()->addr_limit.seg)) 39 40 /* 41 * Test whether a block of memory is a valid user space address. 42 * Returns 0 if the range is valid, nonzero otherwise. 43 * 44 * This is equivalent to the following test: 45 * (u33)addr + (u33)size > (u33)current->addr_limit.seg (u65 for x86_64) 46 * 47 * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... 48 */ 49 50 #define __range_not_ok(addr, size) \ 51 ({ \ 52 unsigned long flag, roksum; \ 53 __chk_user_ptr(addr); \ 54 asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \ 55 : "=&r" (flag), "=r" (roksum) \ 56 : "1" (addr), "g" ((long)(size)), \ 57 "rm" (current_thread_info()->addr_limit.seg)); \ 58 flag; \ 59 }) 60 61 /** 62 * access_ok: - Checks if a user space pointer is valid 63 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that 64 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe 65 * to write to a block, it is always safe to read from it. 66 * @addr: User space pointer to start of block to check 67 * @size: Size of block to check 68 * 69 * Context: User context only. This function may sleep. 70 * 71 * Checks if a pointer to a block of memory in user space is valid. 72 * 73 * Returns true (nonzero) if the memory block may be valid, false (zero) 74 * if it is definitely invalid. 75 * 76 * Note that, depending on architecture, this function probably just 77 * checks that the pointer is in the user space range - after calling 78 * this function, memory access functions may still return -EFAULT. 79 */ 80 #define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0)) 81 82 /* 83 * The exception table consists of pairs of addresses relative to the 84 * exception table enty itself: the first is the address of an 85 * instruction that is allowed to fault, and the second is the address 86 * at which the program should continue. No registers are modified, 87 * so it is entirely up to the continuation code to figure out what to 88 * do. 89 * 90 * All the routines below use bits of fixup code that are out of line 91 * with the main instruction path. This means when everything is well, 92 * we don't even have to jump over them. Further, they do not intrude 93 * on our cache or tlb entries. 94 */ 95 96 struct exception_table_entry { 97 int insn, fixup; 98 }; 99 /* This is not the generic standard exception_table_entry format */ 100 #define ARCH_HAS_SORT_EXTABLE 101 #define ARCH_HAS_SEARCH_EXTABLE 102 103 extern int fixup_exception(struct pt_regs *regs); 104 extern int early_fixup_exception(unsigned long *ip); 105 106 /* 107 * These are the main single-value transfer routines. They automatically 108 * use the right size if we just have the right pointer type. 109 * 110 * This gets kind of ugly. We want to return _two_ values in "get_user()" 111 * and yet we don't want to do any pointers, because that is too much 112 * of a performance impact. Thus we have a few rather ugly macros here, 113 * and hide all the ugliness from the user. 114 * 115 * The "__xxx" versions of the user access functions are versions that 116 * do not verify the address space, that must have been done previously 117 * with a separate "access_ok()" call (this is used when we do multiple 118 * accesses to the same area of user memory). 119 */ 120 121 extern int __get_user_1(void); 122 extern int __get_user_2(void); 123 extern int __get_user_4(void); 124 extern int __get_user_8(void); 125 extern int __get_user_bad(void); 126 127 #define __get_user_x(size, ret, x, ptr) \ 128 asm volatile("call __get_user_" #size \ 129 : "=a" (ret), "=d" (x) \ 130 : "0" (ptr)) \ 131 132 /* Careful: we have to cast the result to the type of the pointer 133 * for sign reasons */ 134 135 /** 136 * get_user: - Get a simple variable from user space. 137 * @x: Variable to store result. 138 * @ptr: Source address, in user space. 139 * 140 * Context: User context only. This function may sleep. 141 * 142 * This macro copies a single simple variable from user space to kernel 143 * space. It supports simple types like char and int, but not larger 144 * data types like structures or arrays. 145 * 146 * @ptr must have pointer-to-simple-variable type, and the result of 147 * dereferencing @ptr must be assignable to @x without a cast. 148 * 149 * Returns zero on success, or -EFAULT on error. 150 * On error, the variable @x is set to zero. 151 */ 152 #ifdef CONFIG_X86_32 153 #define __get_user_8(__ret_gu, __val_gu, ptr) \ 154 __get_user_x(X, __ret_gu, __val_gu, ptr) 155 #else 156 #define __get_user_8(__ret_gu, __val_gu, ptr) \ 157 __get_user_x(8, __ret_gu, __val_gu, ptr) 158 #endif 159 160 #define get_user(x, ptr) \ 161 ({ \ 162 int __ret_gu; \ 163 unsigned long __val_gu; \ 164 __chk_user_ptr(ptr); \ 165 might_fault(); \ 166 switch (sizeof(*(ptr))) { \ 167 case 1: \ 168 __get_user_x(1, __ret_gu, __val_gu, ptr); \ 169 break; \ 170 case 2: \ 171 __get_user_x(2, __ret_gu, __val_gu, ptr); \ 172 break; \ 173 case 4: \ 174 __get_user_x(4, __ret_gu, __val_gu, ptr); \ 175 break; \ 176 case 8: \ 177 __get_user_8(__ret_gu, __val_gu, ptr); \ 178 break; \ 179 default: \ 180 __get_user_x(X, __ret_gu, __val_gu, ptr); \ 181 break; \ 182 } \ 183 (x) = (__typeof__(*(ptr)))__val_gu; \ 184 __ret_gu; \ 185 }) 186 187 #define __put_user_x(size, x, ptr, __ret_pu) \ 188 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ 189 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 190 191 192 193 #ifdef CONFIG_X86_32 194 #define __put_user_asm_u64(x, addr, err, errret) \ 195 asm volatile("1: movl %%eax,0(%2)\n" \ 196 "2: movl %%edx,4(%2)\n" \ 197 "3:\n" \ 198 ".section .fixup,\"ax\"\n" \ 199 "4: movl %3,%0\n" \ 200 " jmp 3b\n" \ 201 ".previous\n" \ 202 _ASM_EXTABLE(1b, 4b) \ 203 _ASM_EXTABLE(2b, 4b) \ 204 : "=r" (err) \ 205 : "A" (x), "r" (addr), "i" (errret), "0" (err)) 206 207 #define __put_user_asm_ex_u64(x, addr) \ 208 asm volatile("1: movl %%eax,0(%1)\n" \ 209 "2: movl %%edx,4(%1)\n" \ 210 "3:\n" \ 211 _ASM_EXTABLE_EX(1b, 2b) \ 212 _ASM_EXTABLE_EX(2b, 3b) \ 213 : : "A" (x), "r" (addr)) 214 215 #define __put_user_x8(x, ptr, __ret_pu) \ 216 asm volatile("call __put_user_8" : "=a" (__ret_pu) \ 217 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 218 #else 219 #define __put_user_asm_u64(x, ptr, retval, errret) \ 220 __put_user_asm(x, ptr, retval, "q", "", "er", errret) 221 #define __put_user_asm_ex_u64(x, addr) \ 222 __put_user_asm_ex(x, addr, "q", "", "er") 223 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) 224 #endif 225 226 extern void __put_user_bad(void); 227 228 /* 229 * Strange magic calling convention: pointer in %ecx, 230 * value in %eax(:%edx), return value in %eax. clobbers %rbx 231 */ 232 extern void __put_user_1(void); 233 extern void __put_user_2(void); 234 extern void __put_user_4(void); 235 extern void __put_user_8(void); 236 237 #ifdef CONFIG_X86_WP_WORKS_OK 238 239 /** 240 * put_user: - Write a simple value into user space. 241 * @x: Value to copy to user space. 242 * @ptr: Destination address, in user space. 243 * 244 * Context: User context only. This function may sleep. 245 * 246 * This macro copies a single simple value from kernel space to user 247 * space. It supports simple types like char and int, but not larger 248 * data types like structures or arrays. 249 * 250 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 251 * to the result of dereferencing @ptr. 252 * 253 * Returns zero on success, or -EFAULT on error. 254 */ 255 #define put_user(x, ptr) \ 256 ({ \ 257 int __ret_pu; \ 258 __typeof__(*(ptr)) __pu_val; \ 259 __chk_user_ptr(ptr); \ 260 might_fault(); \ 261 __pu_val = x; \ 262 switch (sizeof(*(ptr))) { \ 263 case 1: \ 264 __put_user_x(1, __pu_val, ptr, __ret_pu); \ 265 break; \ 266 case 2: \ 267 __put_user_x(2, __pu_val, ptr, __ret_pu); \ 268 break; \ 269 case 4: \ 270 __put_user_x(4, __pu_val, ptr, __ret_pu); \ 271 break; \ 272 case 8: \ 273 __put_user_x8(__pu_val, ptr, __ret_pu); \ 274 break; \ 275 default: \ 276 __put_user_x(X, __pu_val, ptr, __ret_pu); \ 277 break; \ 278 } \ 279 __ret_pu; \ 280 }) 281 282 #define __put_user_size(x, ptr, size, retval, errret) \ 283 do { \ 284 retval = 0; \ 285 __chk_user_ptr(ptr); \ 286 switch (size) { \ 287 case 1: \ 288 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \ 289 break; \ 290 case 2: \ 291 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ 292 break; \ 293 case 4: \ 294 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \ 295 break; \ 296 case 8: \ 297 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \ 298 errret); \ 299 break; \ 300 default: \ 301 __put_user_bad(); \ 302 } \ 303 } while (0) 304 305 #define __put_user_size_ex(x, ptr, size) \ 306 do { \ 307 __chk_user_ptr(ptr); \ 308 switch (size) { \ 309 case 1: \ 310 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \ 311 break; \ 312 case 2: \ 313 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \ 314 break; \ 315 case 4: \ 316 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \ 317 break; \ 318 case 8: \ 319 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \ 320 break; \ 321 default: \ 322 __put_user_bad(); \ 323 } \ 324 } while (0) 325 326 #else 327 328 #define __put_user_size(x, ptr, size, retval, errret) \ 329 do { \ 330 __typeof__(*(ptr))__pus_tmp = x; \ 331 retval = 0; \ 332 \ 333 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \ 334 retval = errret; \ 335 } while (0) 336 337 #define put_user(x, ptr) \ 338 ({ \ 339 int __ret_pu; \ 340 __typeof__(*(ptr))__pus_tmp = x; \ 341 __ret_pu = 0; \ 342 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \ 343 sizeof(*(ptr))) != 0)) \ 344 __ret_pu = -EFAULT; \ 345 __ret_pu; \ 346 }) 347 #endif 348 349 #ifdef CONFIG_X86_32 350 #define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad() 351 #define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad() 352 #else 353 #define __get_user_asm_u64(x, ptr, retval, errret) \ 354 __get_user_asm(x, ptr, retval, "q", "", "=r", errret) 355 #define __get_user_asm_ex_u64(x, ptr) \ 356 __get_user_asm_ex(x, ptr, "q", "", "=r") 357 #endif 358 359 #define __get_user_size(x, ptr, size, retval, errret) \ 360 do { \ 361 retval = 0; \ 362 __chk_user_ptr(ptr); \ 363 switch (size) { \ 364 case 1: \ 365 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \ 366 break; \ 367 case 2: \ 368 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \ 369 break; \ 370 case 4: \ 371 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \ 372 break; \ 373 case 8: \ 374 __get_user_asm_u64(x, ptr, retval, errret); \ 375 break; \ 376 default: \ 377 (x) = __get_user_bad(); \ 378 } \ 379 } while (0) 380 381 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 382 asm volatile("1: mov"itype" %2,%"rtype"1\n" \ 383 "2:\n" \ 384 ".section .fixup,\"ax\"\n" \ 385 "3: mov %3,%0\n" \ 386 " xor"itype" %"rtype"1,%"rtype"1\n" \ 387 " jmp 2b\n" \ 388 ".previous\n" \ 389 _ASM_EXTABLE(1b, 3b) \ 390 : "=r" (err), ltype(x) \ 391 : "m" (__m(addr)), "i" (errret), "0" (err)) 392 393 #define __get_user_size_ex(x, ptr, size) \ 394 do { \ 395 __chk_user_ptr(ptr); \ 396 switch (size) { \ 397 case 1: \ 398 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \ 399 break; \ 400 case 2: \ 401 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \ 402 break; \ 403 case 4: \ 404 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \ 405 break; \ 406 case 8: \ 407 __get_user_asm_ex_u64(x, ptr); \ 408 break; \ 409 default: \ 410 (x) = __get_user_bad(); \ 411 } \ 412 } while (0) 413 414 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ 415 asm volatile("1: mov"itype" %1,%"rtype"0\n" \ 416 "2:\n" \ 417 _ASM_EXTABLE_EX(1b, 2b) \ 418 : ltype(x) : "m" (__m(addr))) 419 420 #define __put_user_nocheck(x, ptr, size) \ 421 ({ \ 422 int __pu_err; \ 423 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \ 424 __pu_err; \ 425 }) 426 427 #define __get_user_nocheck(x, ptr, size) \ 428 ({ \ 429 int __gu_err; \ 430 unsigned long __gu_val; \ 431 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ 432 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 433 __gu_err; \ 434 }) 435 436 /* FIXME: this hack is definitely wrong -AK */ 437 struct __large_struct { unsigned long buf[100]; }; 438 #define __m(x) (*(struct __large_struct __user *)(x)) 439 440 /* 441 * Tell gcc we read from memory instead of writing: this is because 442 * we do not write to any memory gcc knows about, so there are no 443 * aliasing issues. 444 */ 445 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 446 asm volatile("1: mov"itype" %"rtype"1,%2\n" \ 447 "2:\n" \ 448 ".section .fixup,\"ax\"\n" \ 449 "3: mov %3,%0\n" \ 450 " jmp 2b\n" \ 451 ".previous\n" \ 452 _ASM_EXTABLE(1b, 3b) \ 453 : "=r"(err) \ 454 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) 455 456 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ 457 asm volatile("1: mov"itype" %"rtype"0,%1\n" \ 458 "2:\n" \ 459 _ASM_EXTABLE_EX(1b, 2b) \ 460 : : ltype(x), "m" (__m(addr))) 461 462 /* 463 * uaccess_try and catch 464 */ 465 #define uaccess_try do { \ 466 int prev_err = current_thread_info()->uaccess_err; \ 467 current_thread_info()->uaccess_err = 0; \ 468 barrier(); 469 470 #define uaccess_catch(err) \ 471 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \ 472 current_thread_info()->uaccess_err = prev_err; \ 473 } while (0) 474 475 /** 476 * __get_user: - Get a simple variable from user space, with less checking. 477 * @x: Variable to store result. 478 * @ptr: Source address, in user space. 479 * 480 * Context: User context only. This function may sleep. 481 * 482 * This macro copies a single simple variable from user space to kernel 483 * space. It supports simple types like char and int, but not larger 484 * data types like structures or arrays. 485 * 486 * @ptr must have pointer-to-simple-variable type, and the result of 487 * dereferencing @ptr must be assignable to @x without a cast. 488 * 489 * Caller must check the pointer with access_ok() before calling this 490 * function. 491 * 492 * Returns zero on success, or -EFAULT on error. 493 * On error, the variable @x is set to zero. 494 */ 495 496 #define __get_user(x, ptr) \ 497 __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 498 499 /** 500 * __put_user: - Write a simple value into user space, with less checking. 501 * @x: Value to copy to user space. 502 * @ptr: Destination address, in user space. 503 * 504 * Context: User context only. This function may sleep. 505 * 506 * This macro copies a single simple value from kernel space to user 507 * space. It supports simple types like char and int, but not larger 508 * data types like structures or arrays. 509 * 510 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 511 * to the result of dereferencing @ptr. 512 * 513 * Caller must check the pointer with access_ok() before calling this 514 * function. 515 * 516 * Returns zero on success, or -EFAULT on error. 517 */ 518 519 #define __put_user(x, ptr) \ 520 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 521 522 #define __get_user_unaligned __get_user 523 #define __put_user_unaligned __put_user 524 525 /* 526 * {get|put}_user_try and catch 527 * 528 * get_user_try { 529 * get_user_ex(...); 530 * } get_user_catch(err) 531 */ 532 #define get_user_try uaccess_try 533 #define get_user_catch(err) uaccess_catch(err) 534 535 #define get_user_ex(x, ptr) do { \ 536 unsigned long __gue_val; \ 537 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \ 538 (x) = (__force __typeof__(*(ptr)))__gue_val; \ 539 } while (0) 540 541 #ifdef CONFIG_X86_WP_WORKS_OK 542 543 #define put_user_try uaccess_try 544 #define put_user_catch(err) uaccess_catch(err) 545 546 #define put_user_ex(x, ptr) \ 547 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 548 549 #else /* !CONFIG_X86_WP_WORKS_OK */ 550 551 #define put_user_try do { \ 552 int __uaccess_err = 0; 553 554 #define put_user_catch(err) \ 555 (err) |= __uaccess_err; \ 556 } while (0) 557 558 #define put_user_ex(x, ptr) do { \ 559 __uaccess_err |= __put_user(x, ptr); \ 560 } while (0) 561 562 #endif /* CONFIG_X86_WP_WORKS_OK */ 563 564 extern unsigned long 565 copy_from_user_nmi(void *to, const void __user *from, unsigned long n); 566 extern __must_check long 567 strncpy_from_user(char *dst, const char __user *src, long count); 568 569 extern __must_check long strlen_user(const char __user *str); 570 extern __must_check long strnlen_user(const char __user *str, long n); 571 572 /* 573 * movsl can be slow when source and dest are not both 8-byte aligned 574 */ 575 #ifdef CONFIG_X86_INTEL_USERCOPY 576 extern struct movsl_mask { 577 int mask; 578 } ____cacheline_aligned_in_smp movsl_mask; 579 #endif 580 581 #define ARCH_HAS_NOCACHE_UACCESS 1 582 583 #ifdef CONFIG_X86_32 584 # include "uaccess_32.h" 585 #else 586 # include "uaccess_64.h" 587 #endif 588 589 #endif /* _ASM_X86_UACCESS_H */ 590 591