1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_UACCESS_H 3 #define _ASM_X86_UACCESS_H 4 /* 5 * User space memory access functions 6 */ 7 #include <linux/compiler.h> 8 #include <linux/kasan-checks.h> 9 #include <linux/string.h> 10 #include <asm/asm.h> 11 #include <asm/page.h> 12 #include <asm/smap.h> 13 #include <asm/extable.h> 14 15 /* 16 * The fs value determines whether argument validity checking should be 17 * performed or not. If get_fs() == USER_DS, checking is performed, with 18 * get_fs() == KERNEL_DS, checking is bypassed. 19 * 20 * For historical reasons, these macros are grossly misnamed. 21 */ 22 23 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 24 25 #define KERNEL_DS MAKE_MM_SEG(-1UL) 26 #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX) 27 28 #define get_ds() (KERNEL_DS) 29 #define get_fs() (current->thread.addr_limit) 30 static inline void set_fs(mm_segment_t fs) 31 { 32 current->thread.addr_limit = fs; 33 /* On user-mode return, check fs is correct */ 34 set_thread_flag(TIF_FSCHECK); 35 } 36 37 #define segment_eq(a, b) ((a).seg == (b).seg) 38 39 #define user_addr_max() (current->thread.addr_limit.seg) 40 #define __addr_ok(addr) \ 41 ((unsigned long __force)(addr) < user_addr_max()) 42 43 /* 44 * Test whether a block of memory is a valid user space address. 45 * Returns 0 if the range is valid, nonzero otherwise. 46 */ 47 static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit) 48 { 49 /* 50 * If we have used "sizeof()" for the size, 51 * we know it won't overflow the limit (but 52 * it might overflow the 'addr', so it's 53 * important to subtract the size from the 54 * limit, not add it to the address). 55 */ 56 if (__builtin_constant_p(size)) 57 return unlikely(addr > limit - size); 58 59 /* Arbitrary sizes? Be careful about overflow */ 60 addr += size; 61 if (unlikely(addr < size)) 62 return true; 63 return unlikely(addr > limit); 64 } 65 66 #define __range_not_ok(addr, size, limit) \ 67 ({ \ 68 __chk_user_ptr(addr); \ 69 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ 70 }) 71 72 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 73 # define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task()) 74 #else 75 # define WARN_ON_IN_IRQ() 76 #endif 77 78 /** 79 * access_ok: - Checks if a user space pointer is valid 80 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that 81 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe 82 * to write to a block, it is always safe to read from it. 83 * @addr: User space pointer to start of block to check 84 * @size: Size of block to check 85 * 86 * Context: User context only. This function may sleep if pagefaults are 87 * enabled. 88 * 89 * Checks if a pointer to a block of memory in user space is valid. 90 * 91 * Returns true (nonzero) if the memory block may be valid, false (zero) 92 * if it is definitely invalid. 93 * 94 * Note that, depending on architecture, this function probably just 95 * checks that the pointer is in the user space range - after calling 96 * this function, memory access functions may still return -EFAULT. 97 */ 98 #define access_ok(type, addr, size) \ 99 ({ \ 100 WARN_ON_IN_IRQ(); \ 101 likely(!__range_not_ok(addr, size, user_addr_max())); \ 102 }) 103 104 /* 105 * These are the main single-value transfer routines. They automatically 106 * use the right size if we just have the right pointer type. 107 * 108 * This gets kind of ugly. We want to return _two_ values in "get_user()" 109 * and yet we don't want to do any pointers, because that is too much 110 * of a performance impact. Thus we have a few rather ugly macros here, 111 * and hide all the ugliness from the user. 112 * 113 * The "__xxx" versions of the user access functions are versions that 114 * do not verify the address space, that must have been done previously 115 * with a separate "access_ok()" call (this is used when we do multiple 116 * accesses to the same area of user memory). 117 */ 118 119 extern int __get_user_1(void); 120 extern int __get_user_2(void); 121 extern int __get_user_4(void); 122 extern int __get_user_8(void); 123 extern int __get_user_bad(void); 124 125 #define __uaccess_begin() stac() 126 #define __uaccess_end() clac() 127 128 /* 129 * This is a type: either unsigned long, if the argument fits into 130 * that type, or otherwise unsigned long long. 131 */ 132 #define __inttype(x) \ 133 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) 134 135 /** 136 * get_user: - Get a simple variable from user space. 137 * @x: Variable to store result. 138 * @ptr: Source address, in user space. 139 * 140 * Context: User context only. This function may sleep if pagefaults are 141 * enabled. 142 * 143 * This macro copies a single simple variable from user space to kernel 144 * space. It supports simple types like char and int, but not larger 145 * data types like structures or arrays. 146 * 147 * @ptr must have pointer-to-simple-variable type, and the result of 148 * dereferencing @ptr must be assignable to @x without a cast. 149 * 150 * Returns zero on success, or -EFAULT on error. 151 * On error, the variable @x is set to zero. 152 */ 153 /* 154 * Careful: we have to cast the result to the type of the pointer 155 * for sign reasons. 156 * 157 * The use of _ASM_DX as the register specifier is a bit of a 158 * simplification, as gcc only cares about it as the starting point 159 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits 160 * (%ecx being the next register in gcc's x86 register sequence), and 161 * %rdx on 64 bits. 162 * 163 * Clang/LLVM cares about the size of the register, but still wants 164 * the base register for something that ends up being a pair. 165 */ 166 #define get_user(x, ptr) \ 167 ({ \ 168 int __ret_gu; \ 169 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ 170 __chk_user_ptr(ptr); \ 171 might_fault(); \ 172 asm volatile("call __get_user_%P4" \ 173 : "=a" (__ret_gu), "=r" (__val_gu), \ 174 ASM_CALL_CONSTRAINT \ 175 : "0" (ptr), "i" (sizeof(*(ptr)))); \ 176 (x) = (__force __typeof__(*(ptr))) __val_gu; \ 177 __builtin_expect(__ret_gu, 0); \ 178 }) 179 180 #define __put_user_x(size, x, ptr, __ret_pu) \ 181 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ 182 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 183 184 185 186 #ifdef CONFIG_X86_32 187 #define __put_user_asm_u64(x, addr, err, errret) \ 188 asm volatile("\n" \ 189 "1: movl %%eax,0(%2)\n" \ 190 "2: movl %%edx,4(%2)\n" \ 191 "3:" \ 192 ".section .fixup,\"ax\"\n" \ 193 "4: movl %3,%0\n" \ 194 " jmp 3b\n" \ 195 ".previous\n" \ 196 _ASM_EXTABLE(1b, 4b) \ 197 _ASM_EXTABLE(2b, 4b) \ 198 : "=r" (err) \ 199 : "A" (x), "r" (addr), "i" (errret), "0" (err)) 200 201 #define __put_user_asm_ex_u64(x, addr) \ 202 asm volatile("\n" \ 203 "1: movl %%eax,0(%1)\n" \ 204 "2: movl %%edx,4(%1)\n" \ 205 "3:" \ 206 _ASM_EXTABLE_EX(1b, 2b) \ 207 _ASM_EXTABLE_EX(2b, 3b) \ 208 : : "A" (x), "r" (addr)) 209 210 #define __put_user_x8(x, ptr, __ret_pu) \ 211 asm volatile("call __put_user_8" : "=a" (__ret_pu) \ 212 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 213 #else 214 #define __put_user_asm_u64(x, ptr, retval, errret) \ 215 __put_user_asm(x, ptr, retval, "q", "", "er", errret) 216 #define __put_user_asm_ex_u64(x, addr) \ 217 __put_user_asm_ex(x, addr, "q", "", "er") 218 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) 219 #endif 220 221 extern void __put_user_bad(void); 222 223 /* 224 * Strange magic calling convention: pointer in %ecx, 225 * value in %eax(:%edx), return value in %eax. clobbers %rbx 226 */ 227 extern void __put_user_1(void); 228 extern void __put_user_2(void); 229 extern void __put_user_4(void); 230 extern void __put_user_8(void); 231 232 /** 233 * put_user: - Write a simple value into user space. 234 * @x: Value to copy to user space. 235 * @ptr: Destination address, in user space. 236 * 237 * Context: User context only. This function may sleep if pagefaults are 238 * enabled. 239 * 240 * This macro copies a single simple value from kernel space to user 241 * space. It supports simple types like char and int, but not larger 242 * data types like structures or arrays. 243 * 244 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 245 * to the result of dereferencing @ptr. 246 * 247 * Returns zero on success, or -EFAULT on error. 248 */ 249 #define put_user(x, ptr) \ 250 ({ \ 251 int __ret_pu; \ 252 __typeof__(*(ptr)) __pu_val; \ 253 __chk_user_ptr(ptr); \ 254 might_fault(); \ 255 __pu_val = x; \ 256 switch (sizeof(*(ptr))) { \ 257 case 1: \ 258 __put_user_x(1, __pu_val, ptr, __ret_pu); \ 259 break; \ 260 case 2: \ 261 __put_user_x(2, __pu_val, ptr, __ret_pu); \ 262 break; \ 263 case 4: \ 264 __put_user_x(4, __pu_val, ptr, __ret_pu); \ 265 break; \ 266 case 8: \ 267 __put_user_x8(__pu_val, ptr, __ret_pu); \ 268 break; \ 269 default: \ 270 __put_user_x(X, __pu_val, ptr, __ret_pu); \ 271 break; \ 272 } \ 273 __builtin_expect(__ret_pu, 0); \ 274 }) 275 276 #define __put_user_size(x, ptr, size, retval, errret) \ 277 do { \ 278 retval = 0; \ 279 __chk_user_ptr(ptr); \ 280 switch (size) { \ 281 case 1: \ 282 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \ 283 break; \ 284 case 2: \ 285 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ 286 break; \ 287 case 4: \ 288 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \ 289 break; \ 290 case 8: \ 291 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \ 292 errret); \ 293 break; \ 294 default: \ 295 __put_user_bad(); \ 296 } \ 297 } while (0) 298 299 /* 300 * This doesn't do __uaccess_begin/end - the exception handling 301 * around it must do that. 302 */ 303 #define __put_user_size_ex(x, ptr, size) \ 304 do { \ 305 __chk_user_ptr(ptr); \ 306 switch (size) { \ 307 case 1: \ 308 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \ 309 break; \ 310 case 2: \ 311 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \ 312 break; \ 313 case 4: \ 314 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \ 315 break; \ 316 case 8: \ 317 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \ 318 break; \ 319 default: \ 320 __put_user_bad(); \ 321 } \ 322 } while (0) 323 324 #ifdef CONFIG_X86_32 325 #define __get_user_asm_u64(x, ptr, retval, errret) \ 326 ({ \ 327 __typeof__(ptr) __ptr = (ptr); \ 328 asm volatile("\n" \ 329 "1: movl %2,%%eax\n" \ 330 "2: movl %3,%%edx\n" \ 331 "3:\n" \ 332 ".section .fixup,\"ax\"\n" \ 333 "4: mov %4,%0\n" \ 334 " xorl %%eax,%%eax\n" \ 335 " xorl %%edx,%%edx\n" \ 336 " jmp 3b\n" \ 337 ".previous\n" \ 338 _ASM_EXTABLE(1b, 4b) \ 339 _ASM_EXTABLE(2b, 4b) \ 340 : "=r" (retval), "=&A"(x) \ 341 : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \ 342 "i" (errret), "0" (retval)); \ 343 }) 344 345 #define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad() 346 #else 347 #define __get_user_asm_u64(x, ptr, retval, errret) \ 348 __get_user_asm(x, ptr, retval, "q", "", "=r", errret) 349 #define __get_user_asm_ex_u64(x, ptr) \ 350 __get_user_asm_ex(x, ptr, "q", "", "=r") 351 #endif 352 353 #define __get_user_size(x, ptr, size, retval, errret) \ 354 do { \ 355 retval = 0; \ 356 __chk_user_ptr(ptr); \ 357 switch (size) { \ 358 case 1: \ 359 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \ 360 break; \ 361 case 2: \ 362 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \ 363 break; \ 364 case 4: \ 365 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \ 366 break; \ 367 case 8: \ 368 __get_user_asm_u64(x, ptr, retval, errret); \ 369 break; \ 370 default: \ 371 (x) = __get_user_bad(); \ 372 } \ 373 } while (0) 374 375 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 376 asm volatile("\n" \ 377 "1: mov"itype" %2,%"rtype"1\n" \ 378 "2:\n" \ 379 ".section .fixup,\"ax\"\n" \ 380 "3: mov %3,%0\n" \ 381 " xor"itype" %"rtype"1,%"rtype"1\n" \ 382 " jmp 2b\n" \ 383 ".previous\n" \ 384 _ASM_EXTABLE(1b, 3b) \ 385 : "=r" (err), ltype(x) \ 386 : "m" (__m(addr)), "i" (errret), "0" (err)) 387 388 #define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \ 389 asm volatile("\n" \ 390 "1: mov"itype" %2,%"rtype"1\n" \ 391 "2:\n" \ 392 ".section .fixup,\"ax\"\n" \ 393 "3: mov %3,%0\n" \ 394 " jmp 2b\n" \ 395 ".previous\n" \ 396 _ASM_EXTABLE(1b, 3b) \ 397 : "=r" (err), ltype(x) \ 398 : "m" (__m(addr)), "i" (errret), "0" (err)) 399 400 /* 401 * This doesn't do __uaccess_begin/end - the exception handling 402 * around it must do that. 403 */ 404 #define __get_user_size_ex(x, ptr, size) \ 405 do { \ 406 __chk_user_ptr(ptr); \ 407 switch (size) { \ 408 case 1: \ 409 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \ 410 break; \ 411 case 2: \ 412 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \ 413 break; \ 414 case 4: \ 415 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \ 416 break; \ 417 case 8: \ 418 __get_user_asm_ex_u64(x, ptr); \ 419 break; \ 420 default: \ 421 (x) = __get_user_bad(); \ 422 } \ 423 } while (0) 424 425 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ 426 asm volatile("1: mov"itype" %1,%"rtype"0\n" \ 427 "2:\n" \ 428 ".section .fixup,\"ax\"\n" \ 429 "3:xor"itype" %"rtype"0,%"rtype"0\n" \ 430 " jmp 2b\n" \ 431 ".previous\n" \ 432 _ASM_EXTABLE_EX(1b, 3b) \ 433 : ltype(x) : "m" (__m(addr))) 434 435 #define __put_user_nocheck(x, ptr, size) \ 436 ({ \ 437 int __pu_err; \ 438 __uaccess_begin(); \ 439 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \ 440 __uaccess_end(); \ 441 __builtin_expect(__pu_err, 0); \ 442 }) 443 444 #define __get_user_nocheck(x, ptr, size) \ 445 ({ \ 446 int __gu_err; \ 447 __inttype(*(ptr)) __gu_val; \ 448 __uaccess_begin(); \ 449 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ 450 __uaccess_end(); \ 451 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 452 __builtin_expect(__gu_err, 0); \ 453 }) 454 455 /* FIXME: this hack is definitely wrong -AK */ 456 struct __large_struct { unsigned long buf[100]; }; 457 #define __m(x) (*(struct __large_struct __user *)(x)) 458 459 /* 460 * Tell gcc we read from memory instead of writing: this is because 461 * we do not write to any memory gcc knows about, so there are no 462 * aliasing issues. 463 */ 464 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 465 asm volatile("\n" \ 466 "1: mov"itype" %"rtype"1,%2\n" \ 467 "2:\n" \ 468 ".section .fixup,\"ax\"\n" \ 469 "3: mov %3,%0\n" \ 470 " jmp 2b\n" \ 471 ".previous\n" \ 472 _ASM_EXTABLE(1b, 3b) \ 473 : "=r"(err) \ 474 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) 475 476 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ 477 asm volatile("1: mov"itype" %"rtype"0,%1\n" \ 478 "2:\n" \ 479 _ASM_EXTABLE_EX(1b, 2b) \ 480 : : ltype(x), "m" (__m(addr))) 481 482 /* 483 * uaccess_try and catch 484 */ 485 #define uaccess_try do { \ 486 current->thread.uaccess_err = 0; \ 487 __uaccess_begin(); \ 488 barrier(); 489 490 #define uaccess_catch(err) \ 491 __uaccess_end(); \ 492 (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \ 493 } while (0) 494 495 /** 496 * __get_user: - Get a simple variable from user space, with less checking. 497 * @x: Variable to store result. 498 * @ptr: Source address, in user space. 499 * 500 * Context: User context only. This function may sleep if pagefaults are 501 * enabled. 502 * 503 * This macro copies a single simple variable from user space to kernel 504 * space. It supports simple types like char and int, but not larger 505 * data types like structures or arrays. 506 * 507 * @ptr must have pointer-to-simple-variable type, and the result of 508 * dereferencing @ptr must be assignable to @x without a cast. 509 * 510 * Caller must check the pointer with access_ok() before calling this 511 * function. 512 * 513 * Returns zero on success, or -EFAULT on error. 514 * On error, the variable @x is set to zero. 515 */ 516 517 #define __get_user(x, ptr) \ 518 __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 519 520 /** 521 * __put_user: - Write a simple value into user space, with less checking. 522 * @x: Value to copy to user space. 523 * @ptr: Destination address, in user space. 524 * 525 * Context: User context only. This function may sleep if pagefaults are 526 * enabled. 527 * 528 * This macro copies a single simple value from kernel space to user 529 * space. It supports simple types like char and int, but not larger 530 * data types like structures or arrays. 531 * 532 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 533 * to the result of dereferencing @ptr. 534 * 535 * Caller must check the pointer with access_ok() before calling this 536 * function. 537 * 538 * Returns zero on success, or -EFAULT on error. 539 */ 540 541 #define __put_user(x, ptr) \ 542 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 543 544 /* 545 * {get|put}_user_try and catch 546 * 547 * get_user_try { 548 * get_user_ex(...); 549 * } get_user_catch(err) 550 */ 551 #define get_user_try uaccess_try 552 #define get_user_catch(err) uaccess_catch(err) 553 554 #define get_user_ex(x, ptr) do { \ 555 unsigned long __gue_val; \ 556 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \ 557 (x) = (__force __typeof__(*(ptr)))__gue_val; \ 558 } while (0) 559 560 #define put_user_try uaccess_try 561 #define put_user_catch(err) uaccess_catch(err) 562 563 #define put_user_ex(x, ptr) \ 564 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 565 566 extern unsigned long 567 copy_from_user_nmi(void *to, const void __user *from, unsigned long n); 568 extern __must_check long 569 strncpy_from_user(char *dst, const char __user *src, long count); 570 571 extern __must_check long strnlen_user(const char __user *str, long n); 572 573 unsigned long __must_check clear_user(void __user *mem, unsigned long len); 574 unsigned long __must_check __clear_user(void __user *mem, unsigned long len); 575 576 extern void __cmpxchg_wrong_size(void) 577 __compiletime_error("Bad argument size for cmpxchg"); 578 579 #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \ 580 ({ \ 581 int __ret = 0; \ 582 __typeof__(ptr) __uval = (uval); \ 583 __typeof__(*(ptr)) __old = (old); \ 584 __typeof__(*(ptr)) __new = (new); \ 585 __uaccess_begin(); \ 586 switch (size) { \ 587 case 1: \ 588 { \ 589 asm volatile("\n" \ 590 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \ 591 "2:\n" \ 592 "\t.section .fixup, \"ax\"\n" \ 593 "3:\tmov %3, %0\n" \ 594 "\tjmp 2b\n" \ 595 "\t.previous\n" \ 596 _ASM_EXTABLE(1b, 3b) \ 597 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 598 : "i" (-EFAULT), "q" (__new), "1" (__old) \ 599 : "memory" \ 600 ); \ 601 break; \ 602 } \ 603 case 2: \ 604 { \ 605 asm volatile("\n" \ 606 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \ 607 "2:\n" \ 608 "\t.section .fixup, \"ax\"\n" \ 609 "3:\tmov %3, %0\n" \ 610 "\tjmp 2b\n" \ 611 "\t.previous\n" \ 612 _ASM_EXTABLE(1b, 3b) \ 613 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 614 : "i" (-EFAULT), "r" (__new), "1" (__old) \ 615 : "memory" \ 616 ); \ 617 break; \ 618 } \ 619 case 4: \ 620 { \ 621 asm volatile("\n" \ 622 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \ 623 "2:\n" \ 624 "\t.section .fixup, \"ax\"\n" \ 625 "3:\tmov %3, %0\n" \ 626 "\tjmp 2b\n" \ 627 "\t.previous\n" \ 628 _ASM_EXTABLE(1b, 3b) \ 629 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 630 : "i" (-EFAULT), "r" (__new), "1" (__old) \ 631 : "memory" \ 632 ); \ 633 break; \ 634 } \ 635 case 8: \ 636 { \ 637 if (!IS_ENABLED(CONFIG_X86_64)) \ 638 __cmpxchg_wrong_size(); \ 639 \ 640 asm volatile("\n" \ 641 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \ 642 "2:\n" \ 643 "\t.section .fixup, \"ax\"\n" \ 644 "3:\tmov %3, %0\n" \ 645 "\tjmp 2b\n" \ 646 "\t.previous\n" \ 647 _ASM_EXTABLE(1b, 3b) \ 648 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 649 : "i" (-EFAULT), "r" (__new), "1" (__old) \ 650 : "memory" \ 651 ); \ 652 break; \ 653 } \ 654 default: \ 655 __cmpxchg_wrong_size(); \ 656 } \ 657 __uaccess_end(); \ 658 *__uval = __old; \ 659 __ret; \ 660 }) 661 662 #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \ 663 ({ \ 664 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \ 665 __user_atomic_cmpxchg_inatomic((uval), (ptr), \ 666 (old), (new), sizeof(*(ptr))) : \ 667 -EFAULT; \ 668 }) 669 670 /* 671 * movsl can be slow when source and dest are not both 8-byte aligned 672 */ 673 #ifdef CONFIG_X86_INTEL_USERCOPY 674 extern struct movsl_mask { 675 int mask; 676 } ____cacheline_aligned_in_smp movsl_mask; 677 #endif 678 679 #define ARCH_HAS_NOCACHE_UACCESS 1 680 681 #ifdef CONFIG_X86_32 682 # include <asm/uaccess_32.h> 683 #else 684 # include <asm/uaccess_64.h> 685 #endif 686 687 /* 688 * We rely on the nested NMI work to allow atomic faults from the NMI path; the 689 * nested NMI paths are careful to preserve CR2. 690 * 691 * Caller must use pagefault_enable/disable, or run in interrupt context, 692 * and also do a uaccess_ok() check 693 */ 694 #define __copy_from_user_nmi __copy_from_user_inatomic 695 696 /* 697 * The "unsafe" user accesses aren't really "unsafe", but the naming 698 * is a big fat warning: you have to not only do the access_ok() 699 * checking before using them, but you have to surround them with the 700 * user_access_begin/end() pair. 701 */ 702 #define user_access_begin() __uaccess_begin() 703 #define user_access_end() __uaccess_end() 704 705 #define unsafe_put_user(x, ptr, err_label) \ 706 do { \ 707 int __pu_err; \ 708 __typeof__(*(ptr)) __pu_val = (x); \ 709 __put_user_size(__pu_val, (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \ 710 if (unlikely(__pu_err)) goto err_label; \ 711 } while (0) 712 713 #define unsafe_get_user(x, ptr, err_label) \ 714 do { \ 715 int __gu_err; \ 716 __inttype(*(ptr)) __gu_val; \ 717 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \ 718 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 719 if (unlikely(__gu_err)) goto err_label; \ 720 } while (0) 721 722 #endif /* _ASM_X86_UACCESS_H */ 723 724