1 #ifndef _ASM_X86_UACCESS_H 2 #define _ASM_X86_UACCESS_H 3 /* 4 * User space memory access functions 5 */ 6 #include <linux/errno.h> 7 #include <linux/compiler.h> 8 #include <linux/thread_info.h> 9 #include <linux/string.h> 10 #include <asm/asm.h> 11 #include <asm/page.h> 12 #include <asm/smap.h> 13 14 #define VERIFY_READ 0 15 #define VERIFY_WRITE 1 16 17 /* 18 * The fs value determines whether argument validity checking should be 19 * performed or not. If get_fs() == USER_DS, checking is performed, with 20 * get_fs() == KERNEL_DS, checking is bypassed. 21 * 22 * For historical reasons, these macros are grossly misnamed. 23 */ 24 25 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 26 27 #define KERNEL_DS MAKE_MM_SEG(-1UL) 28 #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX) 29 30 #define get_ds() (KERNEL_DS) 31 #define get_fs() (current_thread_info()->addr_limit) 32 #define set_fs(x) (current_thread_info()->addr_limit = (x)) 33 34 #define segment_eq(a, b) ((a).seg == (b).seg) 35 36 #define user_addr_max() (current_thread_info()->addr_limit.seg) 37 #define __addr_ok(addr) \ 38 ((unsigned long __force)(addr) < user_addr_max()) 39 40 /* 41 * Test whether a block of memory is a valid user space address. 42 * Returns 0 if the range is valid, nonzero otherwise. 43 * 44 * This is equivalent to the following test: 45 * (u33)addr + (u33)size > (u33)current->addr_limit.seg (u65 for x86_64) 46 * 47 * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... 48 */ 49 50 #define __range_not_ok(addr, size, limit) \ 51 ({ \ 52 unsigned long flag, roksum; \ 53 __chk_user_ptr(addr); \ 54 asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \ 55 : "=&r" (flag), "=r" (roksum) \ 56 : "1" (addr), "g" ((long)(size)), \ 57 "rm" (limit)); \ 58 flag; \ 59 }) 60 61 /** 62 * access_ok: - Checks if a user space pointer is valid 63 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that 64 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe 65 * to write to a block, it is always safe to read from it. 66 * @addr: User space pointer to start of block to check 67 * @size: Size of block to check 68 * 69 * Context: User context only. This function may sleep. 70 * 71 * Checks if a pointer to a block of memory in user space is valid. 72 * 73 * Returns true (nonzero) if the memory block may be valid, false (zero) 74 * if it is definitely invalid. 75 * 76 * Note that, depending on architecture, this function probably just 77 * checks that the pointer is in the user space range - after calling 78 * this function, memory access functions may still return -EFAULT. 79 */ 80 #define access_ok(type, addr, size) \ 81 (likely(__range_not_ok(addr, size, user_addr_max()) == 0)) 82 83 /* 84 * The exception table consists of pairs of addresses relative to the 85 * exception table enty itself: the first is the address of an 86 * instruction that is allowed to fault, and the second is the address 87 * at which the program should continue. No registers are modified, 88 * so it is entirely up to the continuation code to figure out what to 89 * do. 90 * 91 * All the routines below use bits of fixup code that are out of line 92 * with the main instruction path. This means when everything is well, 93 * we don't even have to jump over them. Further, they do not intrude 94 * on our cache or tlb entries. 95 */ 96 97 struct exception_table_entry { 98 int insn, fixup; 99 }; 100 /* This is not the generic standard exception_table_entry format */ 101 #define ARCH_HAS_SORT_EXTABLE 102 #define ARCH_HAS_SEARCH_EXTABLE 103 104 extern int fixup_exception(struct pt_regs *regs); 105 extern int early_fixup_exception(unsigned long *ip); 106 107 /* 108 * These are the main single-value transfer routines. They automatically 109 * use the right size if we just have the right pointer type. 110 * 111 * This gets kind of ugly. We want to return _two_ values in "get_user()" 112 * and yet we don't want to do any pointers, because that is too much 113 * of a performance impact. Thus we have a few rather ugly macros here, 114 * and hide all the ugliness from the user. 115 * 116 * The "__xxx" versions of the user access functions are versions that 117 * do not verify the address space, that must have been done previously 118 * with a separate "access_ok()" call (this is used when we do multiple 119 * accesses to the same area of user memory). 120 */ 121 122 extern int __get_user_1(void); 123 extern int __get_user_2(void); 124 extern int __get_user_4(void); 125 extern int __get_user_8(void); 126 extern int __get_user_bad(void); 127 128 /* 129 * This is a type: either unsigned long, if the argument fits into 130 * that type, or otherwise unsigned long long. 131 */ 132 #define __inttype(x) \ 133 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) 134 135 /** 136 * get_user: - Get a simple variable from user space. 137 * @x: Variable to store result. 138 * @ptr: Source address, in user space. 139 * 140 * Context: User context only. This function may sleep. 141 * 142 * This macro copies a single simple variable from user space to kernel 143 * space. It supports simple types like char and int, but not larger 144 * data types like structures or arrays. 145 * 146 * @ptr must have pointer-to-simple-variable type, and the result of 147 * dereferencing @ptr must be assignable to @x without a cast. 148 * 149 * Returns zero on success, or -EFAULT on error. 150 * On error, the variable @x is set to zero. 151 */ 152 /* 153 * Careful: we have to cast the result to the type of the pointer 154 * for sign reasons. 155 * 156 * The use of %edx as the register specifier is a bit of a 157 * simplification, as gcc only cares about it as the starting point 158 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits 159 * (%ecx being the next register in gcc's x86 register sequence), and 160 * %rdx on 64 bits. 161 */ 162 #define get_user(x, ptr) \ 163 ({ \ 164 int __ret_gu; \ 165 register __inttype(*(ptr)) __val_gu asm("%edx"); \ 166 __chk_user_ptr(ptr); \ 167 might_fault(); \ 168 asm volatile("call __get_user_%P3" \ 169 : "=a" (__ret_gu), "=r" (__val_gu) \ 170 : "0" (ptr), "i" (sizeof(*(ptr)))); \ 171 (x) = (__typeof__(*(ptr))) __val_gu; \ 172 __ret_gu; \ 173 }) 174 175 #define __put_user_x(size, x, ptr, __ret_pu) \ 176 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ 177 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 178 179 180 181 #ifdef CONFIG_X86_32 182 #define __put_user_asm_u64(x, addr, err, errret) \ 183 asm volatile(ASM_STAC "\n" \ 184 "1: movl %%eax,0(%2)\n" \ 185 "2: movl %%edx,4(%2)\n" \ 186 "3: " ASM_CLAC "\n" \ 187 ".section .fixup,\"ax\"\n" \ 188 "4: movl %3,%0\n" \ 189 " jmp 3b\n" \ 190 ".previous\n" \ 191 _ASM_EXTABLE(1b, 4b) \ 192 _ASM_EXTABLE(2b, 4b) \ 193 : "=r" (err) \ 194 : "A" (x), "r" (addr), "i" (errret), "0" (err)) 195 196 #define __put_user_asm_ex_u64(x, addr) \ 197 asm volatile(ASM_STAC "\n" \ 198 "1: movl %%eax,0(%1)\n" \ 199 "2: movl %%edx,4(%1)\n" \ 200 "3: " ASM_CLAC "\n" \ 201 _ASM_EXTABLE_EX(1b, 2b) \ 202 _ASM_EXTABLE_EX(2b, 3b) \ 203 : : "A" (x), "r" (addr)) 204 205 #define __put_user_x8(x, ptr, __ret_pu) \ 206 asm volatile("call __put_user_8" : "=a" (__ret_pu) \ 207 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 208 #else 209 #define __put_user_asm_u64(x, ptr, retval, errret) \ 210 __put_user_asm(x, ptr, retval, "q", "", "er", errret) 211 #define __put_user_asm_ex_u64(x, addr) \ 212 __put_user_asm_ex(x, addr, "q", "", "er") 213 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) 214 #endif 215 216 extern void __put_user_bad(void); 217 218 /* 219 * Strange magic calling convention: pointer in %ecx, 220 * value in %eax(:%edx), return value in %eax. clobbers %rbx 221 */ 222 extern void __put_user_1(void); 223 extern void __put_user_2(void); 224 extern void __put_user_4(void); 225 extern void __put_user_8(void); 226 227 /** 228 * put_user: - Write a simple value into user space. 229 * @x: Value to copy to user space. 230 * @ptr: Destination address, in user space. 231 * 232 * Context: User context only. This function may sleep. 233 * 234 * This macro copies a single simple value from kernel space to user 235 * space. It supports simple types like char and int, but not larger 236 * data types like structures or arrays. 237 * 238 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 239 * to the result of dereferencing @ptr. 240 * 241 * Returns zero on success, or -EFAULT on error. 242 */ 243 #define put_user(x, ptr) \ 244 ({ \ 245 int __ret_pu; \ 246 __typeof__(*(ptr)) __pu_val; \ 247 __chk_user_ptr(ptr); \ 248 might_fault(); \ 249 __pu_val = x; \ 250 switch (sizeof(*(ptr))) { \ 251 case 1: \ 252 __put_user_x(1, __pu_val, ptr, __ret_pu); \ 253 break; \ 254 case 2: \ 255 __put_user_x(2, __pu_val, ptr, __ret_pu); \ 256 break; \ 257 case 4: \ 258 __put_user_x(4, __pu_val, ptr, __ret_pu); \ 259 break; \ 260 case 8: \ 261 __put_user_x8(__pu_val, ptr, __ret_pu); \ 262 break; \ 263 default: \ 264 __put_user_x(X, __pu_val, ptr, __ret_pu); \ 265 break; \ 266 } \ 267 __ret_pu; \ 268 }) 269 270 #define __put_user_size(x, ptr, size, retval, errret) \ 271 do { \ 272 retval = 0; \ 273 __chk_user_ptr(ptr); \ 274 switch (size) { \ 275 case 1: \ 276 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \ 277 break; \ 278 case 2: \ 279 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ 280 break; \ 281 case 4: \ 282 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \ 283 break; \ 284 case 8: \ 285 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \ 286 errret); \ 287 break; \ 288 default: \ 289 __put_user_bad(); \ 290 } \ 291 } while (0) 292 293 #define __put_user_size_ex(x, ptr, size) \ 294 do { \ 295 __chk_user_ptr(ptr); \ 296 switch (size) { \ 297 case 1: \ 298 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \ 299 break; \ 300 case 2: \ 301 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \ 302 break; \ 303 case 4: \ 304 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \ 305 break; \ 306 case 8: \ 307 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \ 308 break; \ 309 default: \ 310 __put_user_bad(); \ 311 } \ 312 } while (0) 313 314 #ifdef CONFIG_X86_32 315 #define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad() 316 #define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad() 317 #else 318 #define __get_user_asm_u64(x, ptr, retval, errret) \ 319 __get_user_asm(x, ptr, retval, "q", "", "=r", errret) 320 #define __get_user_asm_ex_u64(x, ptr) \ 321 __get_user_asm_ex(x, ptr, "q", "", "=r") 322 #endif 323 324 #define __get_user_size(x, ptr, size, retval, errret) \ 325 do { \ 326 retval = 0; \ 327 __chk_user_ptr(ptr); \ 328 switch (size) { \ 329 case 1: \ 330 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \ 331 break; \ 332 case 2: \ 333 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \ 334 break; \ 335 case 4: \ 336 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \ 337 break; \ 338 case 8: \ 339 __get_user_asm_u64(x, ptr, retval, errret); \ 340 break; \ 341 default: \ 342 (x) = __get_user_bad(); \ 343 } \ 344 } while (0) 345 346 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 347 asm volatile(ASM_STAC "\n" \ 348 "1: mov"itype" %2,%"rtype"1\n" \ 349 "2: " ASM_CLAC "\n" \ 350 ".section .fixup,\"ax\"\n" \ 351 "3: mov %3,%0\n" \ 352 " xor"itype" %"rtype"1,%"rtype"1\n" \ 353 " jmp 2b\n" \ 354 ".previous\n" \ 355 _ASM_EXTABLE(1b, 3b) \ 356 : "=r" (err), ltype(x) \ 357 : "m" (__m(addr)), "i" (errret), "0" (err)) 358 359 #define __get_user_size_ex(x, ptr, size) \ 360 do { \ 361 __chk_user_ptr(ptr); \ 362 switch (size) { \ 363 case 1: \ 364 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \ 365 break; \ 366 case 2: \ 367 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \ 368 break; \ 369 case 4: \ 370 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \ 371 break; \ 372 case 8: \ 373 __get_user_asm_ex_u64(x, ptr); \ 374 break; \ 375 default: \ 376 (x) = __get_user_bad(); \ 377 } \ 378 } while (0) 379 380 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ 381 asm volatile("1: mov"itype" %1,%"rtype"0\n" \ 382 "2:\n" \ 383 _ASM_EXTABLE_EX(1b, 2b) \ 384 : ltype(x) : "m" (__m(addr))) 385 386 #define __put_user_nocheck(x, ptr, size) \ 387 ({ \ 388 int __pu_err; \ 389 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \ 390 __pu_err; \ 391 }) 392 393 #define __get_user_nocheck(x, ptr, size) \ 394 ({ \ 395 int __gu_err; \ 396 unsigned long __gu_val; \ 397 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ 398 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 399 __gu_err; \ 400 }) 401 402 /* FIXME: this hack is definitely wrong -AK */ 403 struct __large_struct { unsigned long buf[100]; }; 404 #define __m(x) (*(struct __large_struct __user *)(x)) 405 406 /* 407 * Tell gcc we read from memory instead of writing: this is because 408 * we do not write to any memory gcc knows about, so there are no 409 * aliasing issues. 410 */ 411 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 412 asm volatile(ASM_STAC "\n" \ 413 "1: mov"itype" %"rtype"1,%2\n" \ 414 "2: " ASM_CLAC "\n" \ 415 ".section .fixup,\"ax\"\n" \ 416 "3: mov %3,%0\n" \ 417 " jmp 2b\n" \ 418 ".previous\n" \ 419 _ASM_EXTABLE(1b, 3b) \ 420 : "=r"(err) \ 421 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) 422 423 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ 424 asm volatile("1: mov"itype" %"rtype"0,%1\n" \ 425 "2:\n" \ 426 _ASM_EXTABLE_EX(1b, 2b) \ 427 : : ltype(x), "m" (__m(addr))) 428 429 /* 430 * uaccess_try and catch 431 */ 432 #define uaccess_try do { \ 433 current_thread_info()->uaccess_err = 0; \ 434 stac(); \ 435 barrier(); 436 437 #define uaccess_catch(err) \ 438 clac(); \ 439 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \ 440 } while (0) 441 442 /** 443 * __get_user: - Get a simple variable from user space, with less checking. 444 * @x: Variable to store result. 445 * @ptr: Source address, in user space. 446 * 447 * Context: User context only. This function may sleep. 448 * 449 * This macro copies a single simple variable from user space to kernel 450 * space. It supports simple types like char and int, but not larger 451 * data types like structures or arrays. 452 * 453 * @ptr must have pointer-to-simple-variable type, and the result of 454 * dereferencing @ptr must be assignable to @x without a cast. 455 * 456 * Caller must check the pointer with access_ok() before calling this 457 * function. 458 * 459 * Returns zero on success, or -EFAULT on error. 460 * On error, the variable @x is set to zero. 461 */ 462 463 #define __get_user(x, ptr) \ 464 __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 465 466 /** 467 * __put_user: - Write a simple value into user space, with less checking. 468 * @x: Value to copy to user space. 469 * @ptr: Destination address, in user space. 470 * 471 * Context: User context only. This function may sleep. 472 * 473 * This macro copies a single simple value from kernel space to user 474 * space. It supports simple types like char and int, but not larger 475 * data types like structures or arrays. 476 * 477 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 478 * to the result of dereferencing @ptr. 479 * 480 * Caller must check the pointer with access_ok() before calling this 481 * function. 482 * 483 * Returns zero on success, or -EFAULT on error. 484 */ 485 486 #define __put_user(x, ptr) \ 487 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 488 489 #define __get_user_unaligned __get_user 490 #define __put_user_unaligned __put_user 491 492 /* 493 * {get|put}_user_try and catch 494 * 495 * get_user_try { 496 * get_user_ex(...); 497 * } get_user_catch(err) 498 */ 499 #define get_user_try uaccess_try 500 #define get_user_catch(err) uaccess_catch(err) 501 502 #define get_user_ex(x, ptr) do { \ 503 unsigned long __gue_val; \ 504 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \ 505 (x) = (__force __typeof__(*(ptr)))__gue_val; \ 506 } while (0) 507 508 #define put_user_try uaccess_try 509 #define put_user_catch(err) uaccess_catch(err) 510 511 #define put_user_ex(x, ptr) \ 512 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 513 514 extern unsigned long 515 copy_from_user_nmi(void *to, const void __user *from, unsigned long n); 516 extern __must_check long 517 strncpy_from_user(char *dst, const char __user *src, long count); 518 519 extern __must_check long strlen_user(const char __user *str); 520 extern __must_check long strnlen_user(const char __user *str, long n); 521 522 unsigned long __must_check clear_user(void __user *mem, unsigned long len); 523 unsigned long __must_check __clear_user(void __user *mem, unsigned long len); 524 525 /* 526 * movsl can be slow when source and dest are not both 8-byte aligned 527 */ 528 #ifdef CONFIG_X86_INTEL_USERCOPY 529 extern struct movsl_mask { 530 int mask; 531 } ____cacheline_aligned_in_smp movsl_mask; 532 #endif 533 534 #define ARCH_HAS_NOCACHE_UACCESS 1 535 536 #ifdef CONFIG_X86_32 537 # include <asm/uaccess_32.h> 538 #else 539 # include <asm/uaccess_64.h> 540 #endif 541 542 #endif /* _ASM_X86_UACCESS_H */ 543 544