1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_UACCESS_H 3 #define _ASM_X86_UACCESS_H 4 /* 5 * User space memory access functions 6 */ 7 #include <linux/compiler.h> 8 #include <linux/kasan-checks.h> 9 #include <linux/string.h> 10 #include <asm/asm.h> 11 #include <asm/page.h> 12 #include <asm/smap.h> 13 #include <asm/extable.h> 14 15 /* 16 * The fs value determines whether argument validity checking should be 17 * performed or not. If get_fs() == USER_DS, checking is performed, with 18 * get_fs() == KERNEL_DS, checking is bypassed. 19 * 20 * For historical reasons, these macros are grossly misnamed. 21 */ 22 23 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 24 25 #define KERNEL_DS MAKE_MM_SEG(-1UL) 26 #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX) 27 28 #define get_ds() (KERNEL_DS) 29 #define get_fs() (current->thread.addr_limit) 30 static inline void set_fs(mm_segment_t fs) 31 { 32 current->thread.addr_limit = fs; 33 /* On user-mode return, check fs is correct */ 34 set_thread_flag(TIF_FSCHECK); 35 } 36 37 #define segment_eq(a, b) ((a).seg == (b).seg) 38 39 #define user_addr_max() (current->thread.addr_limit.seg) 40 #define __addr_ok(addr) \ 41 ((unsigned long __force)(addr) < user_addr_max()) 42 43 /* 44 * Test whether a block of memory is a valid user space address. 45 * Returns 0 if the range is valid, nonzero otherwise. 46 */ 47 static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit) 48 { 49 /* 50 * If we have used "sizeof()" for the size, 51 * we know it won't overflow the limit (but 52 * it might overflow the 'addr', so it's 53 * important to subtract the size from the 54 * limit, not add it to the address). 55 */ 56 if (__builtin_constant_p(size)) 57 return unlikely(addr > limit - size); 58 59 /* Arbitrary sizes? Be careful about overflow */ 60 addr += size; 61 if (unlikely(addr < size)) 62 return true; 63 return unlikely(addr > limit); 64 } 65 66 #define __range_not_ok(addr, size, limit) \ 67 ({ \ 68 __chk_user_ptr(addr); \ 69 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ 70 }) 71 72 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 73 # define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task()) 74 #else 75 # define WARN_ON_IN_IRQ() 76 #endif 77 78 /** 79 * access_ok: - Checks if a user space pointer is valid 80 * @addr: User space pointer to start of block to check 81 * @size: Size of block to check 82 * 83 * Context: User context only. This function may sleep if pagefaults are 84 * enabled. 85 * 86 * Checks if a pointer to a block of memory in user space is valid. 87 * 88 * Returns true (nonzero) if the memory block may be valid, false (zero) 89 * if it is definitely invalid. 90 * 91 * Note that, depending on architecture, this function probably just 92 * checks that the pointer is in the user space range - after calling 93 * this function, memory access functions may still return -EFAULT. 94 */ 95 #define access_ok(addr, size) \ 96 ({ \ 97 WARN_ON_IN_IRQ(); \ 98 likely(!__range_not_ok(addr, size, user_addr_max())); \ 99 }) 100 101 /* 102 * These are the main single-value transfer routines. They automatically 103 * use the right size if we just have the right pointer type. 104 * 105 * This gets kind of ugly. We want to return _two_ values in "get_user()" 106 * and yet we don't want to do any pointers, because that is too much 107 * of a performance impact. Thus we have a few rather ugly macros here, 108 * and hide all the ugliness from the user. 109 * 110 * The "__xxx" versions of the user access functions are versions that 111 * do not verify the address space, that must have been done previously 112 * with a separate "access_ok()" call (this is used when we do multiple 113 * accesses to the same area of user memory). 114 */ 115 116 extern int __get_user_1(void); 117 extern int __get_user_2(void); 118 extern int __get_user_4(void); 119 extern int __get_user_8(void); 120 extern int __get_user_bad(void); 121 122 #define __uaccess_begin() stac() 123 #define __uaccess_end() clac() 124 #define __uaccess_begin_nospec() \ 125 ({ \ 126 stac(); \ 127 barrier_nospec(); \ 128 }) 129 130 /* 131 * This is a type: either unsigned long, if the argument fits into 132 * that type, or otherwise unsigned long long. 133 */ 134 #define __inttype(x) \ 135 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) 136 137 /** 138 * get_user: - Get a simple variable from user space. 139 * @x: Variable to store result. 140 * @ptr: Source address, in user space. 141 * 142 * Context: User context only. This function may sleep if pagefaults are 143 * enabled. 144 * 145 * This macro copies a single simple variable from user space to kernel 146 * space. It supports simple types like char and int, but not larger 147 * data types like structures or arrays. 148 * 149 * @ptr must have pointer-to-simple-variable type, and the result of 150 * dereferencing @ptr must be assignable to @x without a cast. 151 * 152 * Returns zero on success, or -EFAULT on error. 153 * On error, the variable @x is set to zero. 154 */ 155 /* 156 * Careful: we have to cast the result to the type of the pointer 157 * for sign reasons. 158 * 159 * The use of _ASM_DX as the register specifier is a bit of a 160 * simplification, as gcc only cares about it as the starting point 161 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits 162 * (%ecx being the next register in gcc's x86 register sequence), and 163 * %rdx on 64 bits. 164 * 165 * Clang/LLVM cares about the size of the register, but still wants 166 * the base register for something that ends up being a pair. 167 */ 168 #define get_user(x, ptr) \ 169 ({ \ 170 int __ret_gu; \ 171 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ 172 __chk_user_ptr(ptr); \ 173 might_fault(); \ 174 asm volatile("call __get_user_%P4" \ 175 : "=a" (__ret_gu), "=r" (__val_gu), \ 176 ASM_CALL_CONSTRAINT \ 177 : "0" (ptr), "i" (sizeof(*(ptr)))); \ 178 (x) = (__force __typeof__(*(ptr))) __val_gu; \ 179 __builtin_expect(__ret_gu, 0); \ 180 }) 181 182 #define __put_user_x(size, x, ptr, __ret_pu) \ 183 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ 184 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 185 186 187 188 #ifdef CONFIG_X86_32 189 #define __put_user_goto_u64(x, addr, label) \ 190 asm_volatile_goto("\n" \ 191 "1: movl %%eax,0(%1)\n" \ 192 "2: movl %%edx,4(%1)\n" \ 193 _ASM_EXTABLE_UA(1b, %l2) \ 194 _ASM_EXTABLE_UA(2b, %l2) \ 195 : : "A" (x), "r" (addr) \ 196 : : label) 197 198 #define __put_user_asm_ex_u64(x, addr) \ 199 asm volatile("\n" \ 200 "1: movl %%eax,0(%1)\n" \ 201 "2: movl %%edx,4(%1)\n" \ 202 "3:" \ 203 _ASM_EXTABLE_EX(1b, 2b) \ 204 _ASM_EXTABLE_EX(2b, 3b) \ 205 : : "A" (x), "r" (addr)) 206 207 #define __put_user_x8(x, ptr, __ret_pu) \ 208 asm volatile("call __put_user_8" : "=a" (__ret_pu) \ 209 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 210 #else 211 #define __put_user_goto_u64(x, ptr, label) \ 212 __put_user_goto(x, ptr, "q", "", "er", label) 213 #define __put_user_asm_ex_u64(x, addr) \ 214 __put_user_asm_ex(x, addr, "q", "", "er") 215 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) 216 #endif 217 218 extern void __put_user_bad(void); 219 220 /* 221 * Strange magic calling convention: pointer in %ecx, 222 * value in %eax(:%edx), return value in %eax. clobbers %rbx 223 */ 224 extern void __put_user_1(void); 225 extern void __put_user_2(void); 226 extern void __put_user_4(void); 227 extern void __put_user_8(void); 228 229 /** 230 * put_user: - Write a simple value into user space. 231 * @x: Value to copy to user space. 232 * @ptr: Destination address, in user space. 233 * 234 * Context: User context only. This function may sleep if pagefaults are 235 * enabled. 236 * 237 * This macro copies a single simple value from kernel space to user 238 * space. It supports simple types like char and int, but not larger 239 * data types like structures or arrays. 240 * 241 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 242 * to the result of dereferencing @ptr. 243 * 244 * Returns zero on success, or -EFAULT on error. 245 */ 246 #define put_user(x, ptr) \ 247 ({ \ 248 int __ret_pu; \ 249 __typeof__(*(ptr)) __pu_val; \ 250 __chk_user_ptr(ptr); \ 251 might_fault(); \ 252 __pu_val = x; \ 253 switch (sizeof(*(ptr))) { \ 254 case 1: \ 255 __put_user_x(1, __pu_val, ptr, __ret_pu); \ 256 break; \ 257 case 2: \ 258 __put_user_x(2, __pu_val, ptr, __ret_pu); \ 259 break; \ 260 case 4: \ 261 __put_user_x(4, __pu_val, ptr, __ret_pu); \ 262 break; \ 263 case 8: \ 264 __put_user_x8(__pu_val, ptr, __ret_pu); \ 265 break; \ 266 default: \ 267 __put_user_x(X, __pu_val, ptr, __ret_pu); \ 268 break; \ 269 } \ 270 __builtin_expect(__ret_pu, 0); \ 271 }) 272 273 #define __put_user_size(x, ptr, size, label) \ 274 do { \ 275 __chk_user_ptr(ptr); \ 276 switch (size) { \ 277 case 1: \ 278 __put_user_goto(x, ptr, "b", "b", "iq", label); \ 279 break; \ 280 case 2: \ 281 __put_user_goto(x, ptr, "w", "w", "ir", label); \ 282 break; \ 283 case 4: \ 284 __put_user_goto(x, ptr, "l", "k", "ir", label); \ 285 break; \ 286 case 8: \ 287 __put_user_goto_u64((__typeof__(*ptr))(x), ptr, label); \ 288 break; \ 289 default: \ 290 __put_user_bad(); \ 291 } \ 292 } while (0) 293 294 /* 295 * This doesn't do __uaccess_begin/end - the exception handling 296 * around it must do that. 297 */ 298 #define __put_user_size_ex(x, ptr, size) \ 299 do { \ 300 __chk_user_ptr(ptr); \ 301 switch (size) { \ 302 case 1: \ 303 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \ 304 break; \ 305 case 2: \ 306 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \ 307 break; \ 308 case 4: \ 309 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \ 310 break; \ 311 case 8: \ 312 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \ 313 break; \ 314 default: \ 315 __put_user_bad(); \ 316 } \ 317 } while (0) 318 319 #ifdef CONFIG_X86_32 320 #define __get_user_asm_u64(x, ptr, retval, errret) \ 321 ({ \ 322 __typeof__(ptr) __ptr = (ptr); \ 323 asm volatile("\n" \ 324 "1: movl %2,%%eax\n" \ 325 "2: movl %3,%%edx\n" \ 326 "3:\n" \ 327 ".section .fixup,\"ax\"\n" \ 328 "4: mov %4,%0\n" \ 329 " xorl %%eax,%%eax\n" \ 330 " xorl %%edx,%%edx\n" \ 331 " jmp 3b\n" \ 332 ".previous\n" \ 333 _ASM_EXTABLE_UA(1b, 4b) \ 334 _ASM_EXTABLE_UA(2b, 4b) \ 335 : "=r" (retval), "=&A"(x) \ 336 : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \ 337 "i" (errret), "0" (retval)); \ 338 }) 339 340 #define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad() 341 #else 342 #define __get_user_asm_u64(x, ptr, retval, errret) \ 343 __get_user_asm(x, ptr, retval, "q", "", "=r", errret) 344 #define __get_user_asm_ex_u64(x, ptr) \ 345 __get_user_asm_ex(x, ptr, "q", "", "=r") 346 #endif 347 348 #define __get_user_size(x, ptr, size, retval, errret) \ 349 do { \ 350 retval = 0; \ 351 __chk_user_ptr(ptr); \ 352 switch (size) { \ 353 case 1: \ 354 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \ 355 break; \ 356 case 2: \ 357 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \ 358 break; \ 359 case 4: \ 360 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \ 361 break; \ 362 case 8: \ 363 __get_user_asm_u64(x, ptr, retval, errret); \ 364 break; \ 365 default: \ 366 (x) = __get_user_bad(); \ 367 } \ 368 } while (0) 369 370 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 371 asm volatile("\n" \ 372 "1: mov"itype" %2,%"rtype"1\n" \ 373 "2:\n" \ 374 ".section .fixup,\"ax\"\n" \ 375 "3: mov %3,%0\n" \ 376 " xor"itype" %"rtype"1,%"rtype"1\n" \ 377 " jmp 2b\n" \ 378 ".previous\n" \ 379 _ASM_EXTABLE_UA(1b, 3b) \ 380 : "=r" (err), ltype(x) \ 381 : "m" (__m(addr)), "i" (errret), "0" (err)) 382 383 #define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \ 384 asm volatile("\n" \ 385 "1: mov"itype" %2,%"rtype"1\n" \ 386 "2:\n" \ 387 ".section .fixup,\"ax\"\n" \ 388 "3: mov %3,%0\n" \ 389 " jmp 2b\n" \ 390 ".previous\n" \ 391 _ASM_EXTABLE_UA(1b, 3b) \ 392 : "=r" (err), ltype(x) \ 393 : "m" (__m(addr)), "i" (errret), "0" (err)) 394 395 /* 396 * This doesn't do __uaccess_begin/end - the exception handling 397 * around it must do that. 398 */ 399 #define __get_user_size_ex(x, ptr, size) \ 400 do { \ 401 __chk_user_ptr(ptr); \ 402 switch (size) { \ 403 case 1: \ 404 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \ 405 break; \ 406 case 2: \ 407 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \ 408 break; \ 409 case 4: \ 410 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \ 411 break; \ 412 case 8: \ 413 __get_user_asm_ex_u64(x, ptr); \ 414 break; \ 415 default: \ 416 (x) = __get_user_bad(); \ 417 } \ 418 } while (0) 419 420 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ 421 asm volatile("1: mov"itype" %1,%"rtype"0\n" \ 422 "2:\n" \ 423 ".section .fixup,\"ax\"\n" \ 424 "3:xor"itype" %"rtype"0,%"rtype"0\n" \ 425 " jmp 2b\n" \ 426 ".previous\n" \ 427 _ASM_EXTABLE_EX(1b, 3b) \ 428 : ltype(x) : "m" (__m(addr))) 429 430 #define __put_user_nocheck(x, ptr, size) \ 431 ({ \ 432 __label__ __pu_label; \ 433 int __pu_err = -EFAULT; \ 434 __uaccess_begin(); \ 435 __put_user_size((x), (ptr), (size), __pu_label); \ 436 __pu_err = 0; \ 437 __pu_label: \ 438 __uaccess_end(); \ 439 __builtin_expect(__pu_err, 0); \ 440 }) 441 442 #define __get_user_nocheck(x, ptr, size) \ 443 ({ \ 444 int __gu_err; \ 445 __inttype(*(ptr)) __gu_val; \ 446 __uaccess_begin_nospec(); \ 447 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ 448 __uaccess_end(); \ 449 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 450 __builtin_expect(__gu_err, 0); \ 451 }) 452 453 /* FIXME: this hack is definitely wrong -AK */ 454 struct __large_struct { unsigned long buf[100]; }; 455 #define __m(x) (*(struct __large_struct __user *)(x)) 456 457 /* 458 * Tell gcc we read from memory instead of writing: this is because 459 * we do not write to any memory gcc knows about, so there are no 460 * aliasing issues. 461 */ 462 #define __put_user_goto(x, addr, itype, rtype, ltype, label) \ 463 asm_volatile_goto("\n" \ 464 "1: mov"itype" %"rtype"0,%1\n" \ 465 _ASM_EXTABLE_UA(1b, %l2) \ 466 : : ltype(x), "m" (__m(addr)) \ 467 : : label) 468 469 #define __put_user_failed(x, addr, itype, rtype, ltype, errret) \ 470 ({ __label__ __puflab; \ 471 int __pufret = errret; \ 472 __put_user_goto(x,addr,itype,rtype,ltype,__puflab); \ 473 __pufret = 0; \ 474 __puflab: __pufret; }) 475 476 #define __put_user_asm(x, addr, retval, itype, rtype, ltype, errret) do { \ 477 retval = __put_user_failed(x, addr, itype, rtype, ltype, errret); \ 478 } while (0) 479 480 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ 481 asm volatile("1: mov"itype" %"rtype"0,%1\n" \ 482 "2:\n" \ 483 _ASM_EXTABLE_EX(1b, 2b) \ 484 : : ltype(x), "m" (__m(addr))) 485 486 /* 487 * uaccess_try and catch 488 */ 489 #define uaccess_try do { \ 490 current->thread.uaccess_err = 0; \ 491 __uaccess_begin(); \ 492 barrier(); 493 494 #define uaccess_try_nospec do { \ 495 current->thread.uaccess_err = 0; \ 496 __uaccess_begin_nospec(); \ 497 498 #define uaccess_catch(err) \ 499 __uaccess_end(); \ 500 (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \ 501 } while (0) 502 503 /** 504 * __get_user: - Get a simple variable from user space, with less checking. 505 * @x: Variable to store result. 506 * @ptr: Source address, in user space. 507 * 508 * Context: User context only. This function may sleep if pagefaults are 509 * enabled. 510 * 511 * This macro copies a single simple variable from user space to kernel 512 * space. It supports simple types like char and int, but not larger 513 * data types like structures or arrays. 514 * 515 * @ptr must have pointer-to-simple-variable type, and the result of 516 * dereferencing @ptr must be assignable to @x without a cast. 517 * 518 * Caller must check the pointer with access_ok() before calling this 519 * function. 520 * 521 * Returns zero on success, or -EFAULT on error. 522 * On error, the variable @x is set to zero. 523 */ 524 525 #define __get_user(x, ptr) \ 526 __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 527 528 /** 529 * __put_user: - Write a simple value into user space, with less checking. 530 * @x: Value to copy to user space. 531 * @ptr: Destination address, in user space. 532 * 533 * Context: User context only. This function may sleep if pagefaults are 534 * enabled. 535 * 536 * This macro copies a single simple value from kernel space to user 537 * space. It supports simple types like char and int, but not larger 538 * data types like structures or arrays. 539 * 540 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 541 * to the result of dereferencing @ptr. 542 * 543 * Caller must check the pointer with access_ok() before calling this 544 * function. 545 * 546 * Returns zero on success, or -EFAULT on error. 547 */ 548 549 #define __put_user(x, ptr) \ 550 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 551 552 /* 553 * {get|put}_user_try and catch 554 * 555 * get_user_try { 556 * get_user_ex(...); 557 * } get_user_catch(err) 558 */ 559 #define get_user_try uaccess_try_nospec 560 #define get_user_catch(err) uaccess_catch(err) 561 562 #define get_user_ex(x, ptr) do { \ 563 unsigned long __gue_val; \ 564 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \ 565 (x) = (__force __typeof__(*(ptr)))__gue_val; \ 566 } while (0) 567 568 #define put_user_try uaccess_try 569 #define put_user_catch(err) uaccess_catch(err) 570 571 #define put_user_ex(x, ptr) \ 572 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 573 574 extern unsigned long 575 copy_from_user_nmi(void *to, const void __user *from, unsigned long n); 576 extern __must_check long 577 strncpy_from_user(char *dst, const char __user *src, long count); 578 579 extern __must_check long strnlen_user(const char __user *str, long n); 580 581 unsigned long __must_check clear_user(void __user *mem, unsigned long len); 582 unsigned long __must_check __clear_user(void __user *mem, unsigned long len); 583 584 extern void __cmpxchg_wrong_size(void) 585 __compiletime_error("Bad argument size for cmpxchg"); 586 587 #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \ 588 ({ \ 589 int __ret = 0; \ 590 __typeof__(ptr) __uval = (uval); \ 591 __typeof__(*(ptr)) __old = (old); \ 592 __typeof__(*(ptr)) __new = (new); \ 593 __uaccess_begin_nospec(); \ 594 switch (size) { \ 595 case 1: \ 596 { \ 597 asm volatile("\n" \ 598 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \ 599 "2:\n" \ 600 "\t.section .fixup, \"ax\"\n" \ 601 "3:\tmov %3, %0\n" \ 602 "\tjmp 2b\n" \ 603 "\t.previous\n" \ 604 _ASM_EXTABLE_UA(1b, 3b) \ 605 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 606 : "i" (-EFAULT), "q" (__new), "1" (__old) \ 607 : "memory" \ 608 ); \ 609 break; \ 610 } \ 611 case 2: \ 612 { \ 613 asm volatile("\n" \ 614 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \ 615 "2:\n" \ 616 "\t.section .fixup, \"ax\"\n" \ 617 "3:\tmov %3, %0\n" \ 618 "\tjmp 2b\n" \ 619 "\t.previous\n" \ 620 _ASM_EXTABLE_UA(1b, 3b) \ 621 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 622 : "i" (-EFAULT), "r" (__new), "1" (__old) \ 623 : "memory" \ 624 ); \ 625 break; \ 626 } \ 627 case 4: \ 628 { \ 629 asm volatile("\n" \ 630 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \ 631 "2:\n" \ 632 "\t.section .fixup, \"ax\"\n" \ 633 "3:\tmov %3, %0\n" \ 634 "\tjmp 2b\n" \ 635 "\t.previous\n" \ 636 _ASM_EXTABLE_UA(1b, 3b) \ 637 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 638 : "i" (-EFAULT), "r" (__new), "1" (__old) \ 639 : "memory" \ 640 ); \ 641 break; \ 642 } \ 643 case 8: \ 644 { \ 645 if (!IS_ENABLED(CONFIG_X86_64)) \ 646 __cmpxchg_wrong_size(); \ 647 \ 648 asm volatile("\n" \ 649 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \ 650 "2:\n" \ 651 "\t.section .fixup, \"ax\"\n" \ 652 "3:\tmov %3, %0\n" \ 653 "\tjmp 2b\n" \ 654 "\t.previous\n" \ 655 _ASM_EXTABLE_UA(1b, 3b) \ 656 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 657 : "i" (-EFAULT), "r" (__new), "1" (__old) \ 658 : "memory" \ 659 ); \ 660 break; \ 661 } \ 662 default: \ 663 __cmpxchg_wrong_size(); \ 664 } \ 665 __uaccess_end(); \ 666 *__uval = __old; \ 667 __ret; \ 668 }) 669 670 #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \ 671 ({ \ 672 access_ok((ptr), sizeof(*(ptr))) ? \ 673 __user_atomic_cmpxchg_inatomic((uval), (ptr), \ 674 (old), (new), sizeof(*(ptr))) : \ 675 -EFAULT; \ 676 }) 677 678 /* 679 * movsl can be slow when source and dest are not both 8-byte aligned 680 */ 681 #ifdef CONFIG_X86_INTEL_USERCOPY 682 extern struct movsl_mask { 683 int mask; 684 } ____cacheline_aligned_in_smp movsl_mask; 685 #endif 686 687 #define ARCH_HAS_NOCACHE_UACCESS 1 688 689 #ifdef CONFIG_X86_32 690 # include <asm/uaccess_32.h> 691 #else 692 # include <asm/uaccess_64.h> 693 #endif 694 695 /* 696 * We rely on the nested NMI work to allow atomic faults from the NMI path; the 697 * nested NMI paths are careful to preserve CR2. 698 * 699 * Caller must use pagefault_enable/disable, or run in interrupt context, 700 * and also do a uaccess_ok() check 701 */ 702 #define __copy_from_user_nmi __copy_from_user_inatomic 703 704 /* 705 * The "unsafe" user accesses aren't really "unsafe", but the naming 706 * is a big fat warning: you have to not only do the access_ok() 707 * checking before using them, but you have to surround them with the 708 * user_access_begin/end() pair. 709 */ 710 static __must_check inline bool user_access_begin(const void __user *ptr, size_t len) 711 { 712 if (unlikely(!access_ok(ptr,len))) 713 return 0; 714 __uaccess_begin(); 715 return 1; 716 } 717 #define user_access_begin(a,b) user_access_begin(a,b) 718 #define user_access_end() __uaccess_end() 719 720 #define unsafe_put_user(x, ptr, label) \ 721 __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label) 722 723 #define unsafe_get_user(x, ptr, err_label) \ 724 do { \ 725 int __gu_err; \ 726 __inttype(*(ptr)) __gu_val; \ 727 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \ 728 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 729 if (unlikely(__gu_err)) goto err_label; \ 730 } while (0) 731 732 #endif /* _ASM_X86_UACCESS_H */ 733 734