1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_UACCESS_H 3 #define _ASM_X86_UACCESS_H 4 /* 5 * User space memory access functions 6 */ 7 #include <linux/compiler.h> 8 #include <linux/kasan-checks.h> 9 #include <linux/string.h> 10 #include <asm/asm.h> 11 #include <asm/page.h> 12 #include <asm/smap.h> 13 #include <asm/extable.h> 14 15 /* 16 * The fs value determines whether argument validity checking should be 17 * performed or not. If get_fs() == USER_DS, checking is performed, with 18 * get_fs() == KERNEL_DS, checking is bypassed. 19 * 20 * For historical reasons, these macros are grossly misnamed. 21 */ 22 23 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 24 25 #define KERNEL_DS MAKE_MM_SEG(-1UL) 26 #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX) 27 28 #define get_ds() (KERNEL_DS) 29 #define get_fs() (current->thread.addr_limit) 30 static inline void set_fs(mm_segment_t fs) 31 { 32 current->thread.addr_limit = fs; 33 /* On user-mode return, check fs is correct */ 34 set_thread_flag(TIF_FSCHECK); 35 } 36 37 #define segment_eq(a, b) ((a).seg == (b).seg) 38 39 #define user_addr_max() (current->thread.addr_limit.seg) 40 #define __addr_ok(addr) \ 41 ((unsigned long __force)(addr) < user_addr_max()) 42 43 /* 44 * Test whether a block of memory is a valid user space address. 45 * Returns 0 if the range is valid, nonzero otherwise. 46 */ 47 static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit) 48 { 49 /* 50 * If we have used "sizeof()" for the size, 51 * we know it won't overflow the limit (but 52 * it might overflow the 'addr', so it's 53 * important to subtract the size from the 54 * limit, not add it to the address). 55 */ 56 if (__builtin_constant_p(size)) 57 return unlikely(addr > limit - size); 58 59 /* Arbitrary sizes? Be careful about overflow */ 60 addr += size; 61 if (unlikely(addr < size)) 62 return true; 63 return unlikely(addr > limit); 64 } 65 66 #define __range_not_ok(addr, size, limit) \ 67 ({ \ 68 __chk_user_ptr(addr); \ 69 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ 70 }) 71 72 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 73 # define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task()) 74 #else 75 # define WARN_ON_IN_IRQ() 76 #endif 77 78 /** 79 * access_ok: - Checks if a user space pointer is valid 80 * @addr: User space pointer to start of block to check 81 * @size: Size of block to check 82 * 83 * Context: User context only. This function may sleep if pagefaults are 84 * enabled. 85 * 86 * Checks if a pointer to a block of memory in user space is valid. 87 * 88 * Returns true (nonzero) if the memory block may be valid, false (zero) 89 * if it is definitely invalid. 90 * 91 * Note that, depending on architecture, this function probably just 92 * checks that the pointer is in the user space range - after calling 93 * this function, memory access functions may still return -EFAULT. 94 */ 95 #define access_ok(addr, size) \ 96 ({ \ 97 WARN_ON_IN_IRQ(); \ 98 likely(!__range_not_ok(addr, size, user_addr_max())); \ 99 }) 100 101 /* 102 * These are the main single-value transfer routines. They automatically 103 * use the right size if we just have the right pointer type. 104 * 105 * This gets kind of ugly. We want to return _two_ values in "get_user()" 106 * and yet we don't want to do any pointers, because that is too much 107 * of a performance impact. Thus we have a few rather ugly macros here, 108 * and hide all the ugliness from the user. 109 * 110 * The "__xxx" versions of the user access functions are versions that 111 * do not verify the address space, that must have been done previously 112 * with a separate "access_ok()" call (this is used when we do multiple 113 * accesses to the same area of user memory). 114 */ 115 116 extern int __get_user_1(void); 117 extern int __get_user_2(void); 118 extern int __get_user_4(void); 119 extern int __get_user_8(void); 120 extern int __get_user_bad(void); 121 122 #define __uaccess_begin() stac() 123 #define __uaccess_end() clac() 124 #define __uaccess_begin_nospec() \ 125 ({ \ 126 stac(); \ 127 barrier_nospec(); \ 128 }) 129 130 /* 131 * This is a type: either unsigned long, if the argument fits into 132 * that type, or otherwise unsigned long long. 133 */ 134 #define __inttype(x) \ 135 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) 136 137 /** 138 * get_user: - Get a simple variable from user space. 139 * @x: Variable to store result. 140 * @ptr: Source address, in user space. 141 * 142 * Context: User context only. This function may sleep if pagefaults are 143 * enabled. 144 * 145 * This macro copies a single simple variable from user space to kernel 146 * space. It supports simple types like char and int, but not larger 147 * data types like structures or arrays. 148 * 149 * @ptr must have pointer-to-simple-variable type, and the result of 150 * dereferencing @ptr must be assignable to @x without a cast. 151 * 152 * Returns zero on success, or -EFAULT on error. 153 * On error, the variable @x is set to zero. 154 */ 155 /* 156 * Careful: we have to cast the result to the type of the pointer 157 * for sign reasons. 158 * 159 * The use of _ASM_DX as the register specifier is a bit of a 160 * simplification, as gcc only cares about it as the starting point 161 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits 162 * (%ecx being the next register in gcc's x86 register sequence), and 163 * %rdx on 64 bits. 164 * 165 * Clang/LLVM cares about the size of the register, but still wants 166 * the base register for something that ends up being a pair. 167 */ 168 #define get_user(x, ptr) \ 169 ({ \ 170 int __ret_gu; \ 171 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ 172 __chk_user_ptr(ptr); \ 173 might_fault(); \ 174 asm volatile("call __get_user_%P4" \ 175 : "=a" (__ret_gu), "=r" (__val_gu), \ 176 ASM_CALL_CONSTRAINT \ 177 : "0" (ptr), "i" (sizeof(*(ptr)))); \ 178 (x) = (__force __typeof__(*(ptr))) __val_gu; \ 179 __builtin_expect(__ret_gu, 0); \ 180 }) 181 182 #define __put_user_x(size, x, ptr, __ret_pu) \ 183 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ 184 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 185 186 187 188 #ifdef CONFIG_X86_32 189 #define __put_user_asm_u64(x, addr, err, errret) \ 190 asm volatile("\n" \ 191 "1: movl %%eax,0(%2)\n" \ 192 "2: movl %%edx,4(%2)\n" \ 193 "3:" \ 194 ".section .fixup,\"ax\"\n" \ 195 "4: movl %3,%0\n" \ 196 " jmp 3b\n" \ 197 ".previous\n" \ 198 _ASM_EXTABLE_UA(1b, 4b) \ 199 _ASM_EXTABLE_UA(2b, 4b) \ 200 : "=r" (err) \ 201 : "A" (x), "r" (addr), "i" (errret), "0" (err)) 202 203 #define __put_user_asm_ex_u64(x, addr) \ 204 asm volatile("\n" \ 205 "1: movl %%eax,0(%1)\n" \ 206 "2: movl %%edx,4(%1)\n" \ 207 "3:" \ 208 _ASM_EXTABLE_EX(1b, 2b) \ 209 _ASM_EXTABLE_EX(2b, 3b) \ 210 : : "A" (x), "r" (addr)) 211 212 #define __put_user_x8(x, ptr, __ret_pu) \ 213 asm volatile("call __put_user_8" : "=a" (__ret_pu) \ 214 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 215 #else 216 #define __put_user_asm_u64(x, ptr, retval, errret) \ 217 __put_user_asm(x, ptr, retval, "q", "", "er", errret) 218 #define __put_user_asm_ex_u64(x, addr) \ 219 __put_user_asm_ex(x, addr, "q", "", "er") 220 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) 221 #endif 222 223 extern void __put_user_bad(void); 224 225 /* 226 * Strange magic calling convention: pointer in %ecx, 227 * value in %eax(:%edx), return value in %eax. clobbers %rbx 228 */ 229 extern void __put_user_1(void); 230 extern void __put_user_2(void); 231 extern void __put_user_4(void); 232 extern void __put_user_8(void); 233 234 /** 235 * put_user: - Write a simple value into user space. 236 * @x: Value to copy to user space. 237 * @ptr: Destination address, in user space. 238 * 239 * Context: User context only. This function may sleep if pagefaults are 240 * enabled. 241 * 242 * This macro copies a single simple value from kernel space to user 243 * space. It supports simple types like char and int, but not larger 244 * data types like structures or arrays. 245 * 246 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 247 * to the result of dereferencing @ptr. 248 * 249 * Returns zero on success, or -EFAULT on error. 250 */ 251 #define put_user(x, ptr) \ 252 ({ \ 253 int __ret_pu; \ 254 __typeof__(*(ptr)) __pu_val; \ 255 __chk_user_ptr(ptr); \ 256 might_fault(); \ 257 __pu_val = x; \ 258 switch (sizeof(*(ptr))) { \ 259 case 1: \ 260 __put_user_x(1, __pu_val, ptr, __ret_pu); \ 261 break; \ 262 case 2: \ 263 __put_user_x(2, __pu_val, ptr, __ret_pu); \ 264 break; \ 265 case 4: \ 266 __put_user_x(4, __pu_val, ptr, __ret_pu); \ 267 break; \ 268 case 8: \ 269 __put_user_x8(__pu_val, ptr, __ret_pu); \ 270 break; \ 271 default: \ 272 __put_user_x(X, __pu_val, ptr, __ret_pu); \ 273 break; \ 274 } \ 275 __builtin_expect(__ret_pu, 0); \ 276 }) 277 278 #define __put_user_size(x, ptr, size, retval, errret) \ 279 do { \ 280 retval = 0; \ 281 __chk_user_ptr(ptr); \ 282 switch (size) { \ 283 case 1: \ 284 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \ 285 break; \ 286 case 2: \ 287 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ 288 break; \ 289 case 4: \ 290 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \ 291 break; \ 292 case 8: \ 293 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \ 294 errret); \ 295 break; \ 296 default: \ 297 __put_user_bad(); \ 298 } \ 299 } while (0) 300 301 /* 302 * This doesn't do __uaccess_begin/end - the exception handling 303 * around it must do that. 304 */ 305 #define __put_user_size_ex(x, ptr, size) \ 306 do { \ 307 __chk_user_ptr(ptr); \ 308 switch (size) { \ 309 case 1: \ 310 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \ 311 break; \ 312 case 2: \ 313 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \ 314 break; \ 315 case 4: \ 316 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \ 317 break; \ 318 case 8: \ 319 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \ 320 break; \ 321 default: \ 322 __put_user_bad(); \ 323 } \ 324 } while (0) 325 326 #ifdef CONFIG_X86_32 327 #define __get_user_asm_u64(x, ptr, retval, errret) \ 328 ({ \ 329 __typeof__(ptr) __ptr = (ptr); \ 330 asm volatile("\n" \ 331 "1: movl %2,%%eax\n" \ 332 "2: movl %3,%%edx\n" \ 333 "3:\n" \ 334 ".section .fixup,\"ax\"\n" \ 335 "4: mov %4,%0\n" \ 336 " xorl %%eax,%%eax\n" \ 337 " xorl %%edx,%%edx\n" \ 338 " jmp 3b\n" \ 339 ".previous\n" \ 340 _ASM_EXTABLE_UA(1b, 4b) \ 341 _ASM_EXTABLE_UA(2b, 4b) \ 342 : "=r" (retval), "=&A"(x) \ 343 : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \ 344 "i" (errret), "0" (retval)); \ 345 }) 346 347 #define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad() 348 #else 349 #define __get_user_asm_u64(x, ptr, retval, errret) \ 350 __get_user_asm(x, ptr, retval, "q", "", "=r", errret) 351 #define __get_user_asm_ex_u64(x, ptr) \ 352 __get_user_asm_ex(x, ptr, "q", "", "=r") 353 #endif 354 355 #define __get_user_size(x, ptr, size, retval, errret) \ 356 do { \ 357 retval = 0; \ 358 __chk_user_ptr(ptr); \ 359 switch (size) { \ 360 case 1: \ 361 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \ 362 break; \ 363 case 2: \ 364 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \ 365 break; \ 366 case 4: \ 367 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \ 368 break; \ 369 case 8: \ 370 __get_user_asm_u64(x, ptr, retval, errret); \ 371 break; \ 372 default: \ 373 (x) = __get_user_bad(); \ 374 } \ 375 } while (0) 376 377 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 378 asm volatile("\n" \ 379 "1: mov"itype" %2,%"rtype"1\n" \ 380 "2:\n" \ 381 ".section .fixup,\"ax\"\n" \ 382 "3: mov %3,%0\n" \ 383 " xor"itype" %"rtype"1,%"rtype"1\n" \ 384 " jmp 2b\n" \ 385 ".previous\n" \ 386 _ASM_EXTABLE_UA(1b, 3b) \ 387 : "=r" (err), ltype(x) \ 388 : "m" (__m(addr)), "i" (errret), "0" (err)) 389 390 #define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \ 391 asm volatile("\n" \ 392 "1: mov"itype" %2,%"rtype"1\n" \ 393 "2:\n" \ 394 ".section .fixup,\"ax\"\n" \ 395 "3: mov %3,%0\n" \ 396 " jmp 2b\n" \ 397 ".previous\n" \ 398 _ASM_EXTABLE_UA(1b, 3b) \ 399 : "=r" (err), ltype(x) \ 400 : "m" (__m(addr)), "i" (errret), "0" (err)) 401 402 /* 403 * This doesn't do __uaccess_begin/end - the exception handling 404 * around it must do that. 405 */ 406 #define __get_user_size_ex(x, ptr, size) \ 407 do { \ 408 __chk_user_ptr(ptr); \ 409 switch (size) { \ 410 case 1: \ 411 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \ 412 break; \ 413 case 2: \ 414 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \ 415 break; \ 416 case 4: \ 417 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \ 418 break; \ 419 case 8: \ 420 __get_user_asm_ex_u64(x, ptr); \ 421 break; \ 422 default: \ 423 (x) = __get_user_bad(); \ 424 } \ 425 } while (0) 426 427 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ 428 asm volatile("1: mov"itype" %1,%"rtype"0\n" \ 429 "2:\n" \ 430 ".section .fixup,\"ax\"\n" \ 431 "3:xor"itype" %"rtype"0,%"rtype"0\n" \ 432 " jmp 2b\n" \ 433 ".previous\n" \ 434 _ASM_EXTABLE_EX(1b, 3b) \ 435 : ltype(x) : "m" (__m(addr))) 436 437 #define __put_user_nocheck(x, ptr, size) \ 438 ({ \ 439 int __pu_err; \ 440 __uaccess_begin(); \ 441 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \ 442 __uaccess_end(); \ 443 __builtin_expect(__pu_err, 0); \ 444 }) 445 446 #define __get_user_nocheck(x, ptr, size) \ 447 ({ \ 448 int __gu_err; \ 449 __inttype(*(ptr)) __gu_val; \ 450 __uaccess_begin_nospec(); \ 451 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ 452 __uaccess_end(); \ 453 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 454 __builtin_expect(__gu_err, 0); \ 455 }) 456 457 /* FIXME: this hack is definitely wrong -AK */ 458 struct __large_struct { unsigned long buf[100]; }; 459 #define __m(x) (*(struct __large_struct __user *)(x)) 460 461 /* 462 * Tell gcc we read from memory instead of writing: this is because 463 * we do not write to any memory gcc knows about, so there are no 464 * aliasing issues. 465 */ 466 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 467 asm volatile("\n" \ 468 "1: mov"itype" %"rtype"1,%2\n" \ 469 "2:\n" \ 470 ".section .fixup,\"ax\"\n" \ 471 "3: mov %3,%0\n" \ 472 " jmp 2b\n" \ 473 ".previous\n" \ 474 _ASM_EXTABLE_UA(1b, 3b) \ 475 : "=r"(err) \ 476 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) 477 478 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ 479 asm volatile("1: mov"itype" %"rtype"0,%1\n" \ 480 "2:\n" \ 481 _ASM_EXTABLE_EX(1b, 2b) \ 482 : : ltype(x), "m" (__m(addr))) 483 484 /* 485 * uaccess_try and catch 486 */ 487 #define uaccess_try do { \ 488 current->thread.uaccess_err = 0; \ 489 __uaccess_begin(); \ 490 barrier(); 491 492 #define uaccess_try_nospec do { \ 493 current->thread.uaccess_err = 0; \ 494 __uaccess_begin_nospec(); \ 495 496 #define uaccess_catch(err) \ 497 __uaccess_end(); \ 498 (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \ 499 } while (0) 500 501 /** 502 * __get_user: - Get a simple variable from user space, with less checking. 503 * @x: Variable to store result. 504 * @ptr: Source address, in user space. 505 * 506 * Context: User context only. This function may sleep if pagefaults are 507 * enabled. 508 * 509 * This macro copies a single simple variable from user space to kernel 510 * space. It supports simple types like char and int, but not larger 511 * data types like structures or arrays. 512 * 513 * @ptr must have pointer-to-simple-variable type, and the result of 514 * dereferencing @ptr must be assignable to @x without a cast. 515 * 516 * Caller must check the pointer with access_ok() before calling this 517 * function. 518 * 519 * Returns zero on success, or -EFAULT on error. 520 * On error, the variable @x is set to zero. 521 */ 522 523 #define __get_user(x, ptr) \ 524 __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 525 526 /** 527 * __put_user: - Write a simple value into user space, with less checking. 528 * @x: Value to copy to user space. 529 * @ptr: Destination address, in user space. 530 * 531 * Context: User context only. This function may sleep if pagefaults are 532 * enabled. 533 * 534 * This macro copies a single simple value from kernel space to user 535 * space. It supports simple types like char and int, but not larger 536 * data types like structures or arrays. 537 * 538 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 539 * to the result of dereferencing @ptr. 540 * 541 * Caller must check the pointer with access_ok() before calling this 542 * function. 543 * 544 * Returns zero on success, or -EFAULT on error. 545 */ 546 547 #define __put_user(x, ptr) \ 548 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 549 550 /* 551 * {get|put}_user_try and catch 552 * 553 * get_user_try { 554 * get_user_ex(...); 555 * } get_user_catch(err) 556 */ 557 #define get_user_try uaccess_try_nospec 558 #define get_user_catch(err) uaccess_catch(err) 559 560 #define get_user_ex(x, ptr) do { \ 561 unsigned long __gue_val; \ 562 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \ 563 (x) = (__force __typeof__(*(ptr)))__gue_val; \ 564 } while (0) 565 566 #define put_user_try uaccess_try 567 #define put_user_catch(err) uaccess_catch(err) 568 569 #define put_user_ex(x, ptr) \ 570 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 571 572 extern unsigned long 573 copy_from_user_nmi(void *to, const void __user *from, unsigned long n); 574 extern __must_check long 575 strncpy_from_user(char *dst, const char __user *src, long count); 576 577 extern __must_check long strnlen_user(const char __user *str, long n); 578 579 unsigned long __must_check clear_user(void __user *mem, unsigned long len); 580 unsigned long __must_check __clear_user(void __user *mem, unsigned long len); 581 582 extern void __cmpxchg_wrong_size(void) 583 __compiletime_error("Bad argument size for cmpxchg"); 584 585 #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \ 586 ({ \ 587 int __ret = 0; \ 588 __typeof__(ptr) __uval = (uval); \ 589 __typeof__(*(ptr)) __old = (old); \ 590 __typeof__(*(ptr)) __new = (new); \ 591 __uaccess_begin_nospec(); \ 592 switch (size) { \ 593 case 1: \ 594 { \ 595 asm volatile("\n" \ 596 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \ 597 "2:\n" \ 598 "\t.section .fixup, \"ax\"\n" \ 599 "3:\tmov %3, %0\n" \ 600 "\tjmp 2b\n" \ 601 "\t.previous\n" \ 602 _ASM_EXTABLE_UA(1b, 3b) \ 603 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 604 : "i" (-EFAULT), "q" (__new), "1" (__old) \ 605 : "memory" \ 606 ); \ 607 break; \ 608 } \ 609 case 2: \ 610 { \ 611 asm volatile("\n" \ 612 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \ 613 "2:\n" \ 614 "\t.section .fixup, \"ax\"\n" \ 615 "3:\tmov %3, %0\n" \ 616 "\tjmp 2b\n" \ 617 "\t.previous\n" \ 618 _ASM_EXTABLE_UA(1b, 3b) \ 619 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 620 : "i" (-EFAULT), "r" (__new), "1" (__old) \ 621 : "memory" \ 622 ); \ 623 break; \ 624 } \ 625 case 4: \ 626 { \ 627 asm volatile("\n" \ 628 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \ 629 "2:\n" \ 630 "\t.section .fixup, \"ax\"\n" \ 631 "3:\tmov %3, %0\n" \ 632 "\tjmp 2b\n" \ 633 "\t.previous\n" \ 634 _ASM_EXTABLE_UA(1b, 3b) \ 635 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 636 : "i" (-EFAULT), "r" (__new), "1" (__old) \ 637 : "memory" \ 638 ); \ 639 break; \ 640 } \ 641 case 8: \ 642 { \ 643 if (!IS_ENABLED(CONFIG_X86_64)) \ 644 __cmpxchg_wrong_size(); \ 645 \ 646 asm volatile("\n" \ 647 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \ 648 "2:\n" \ 649 "\t.section .fixup, \"ax\"\n" \ 650 "3:\tmov %3, %0\n" \ 651 "\tjmp 2b\n" \ 652 "\t.previous\n" \ 653 _ASM_EXTABLE_UA(1b, 3b) \ 654 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 655 : "i" (-EFAULT), "r" (__new), "1" (__old) \ 656 : "memory" \ 657 ); \ 658 break; \ 659 } \ 660 default: \ 661 __cmpxchg_wrong_size(); \ 662 } \ 663 __uaccess_end(); \ 664 *__uval = __old; \ 665 __ret; \ 666 }) 667 668 #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \ 669 ({ \ 670 access_ok((ptr), sizeof(*(ptr))) ? \ 671 __user_atomic_cmpxchg_inatomic((uval), (ptr), \ 672 (old), (new), sizeof(*(ptr))) : \ 673 -EFAULT; \ 674 }) 675 676 /* 677 * movsl can be slow when source and dest are not both 8-byte aligned 678 */ 679 #ifdef CONFIG_X86_INTEL_USERCOPY 680 extern struct movsl_mask { 681 int mask; 682 } ____cacheline_aligned_in_smp movsl_mask; 683 #endif 684 685 #define ARCH_HAS_NOCACHE_UACCESS 1 686 687 #ifdef CONFIG_X86_32 688 # include <asm/uaccess_32.h> 689 #else 690 # include <asm/uaccess_64.h> 691 #endif 692 693 /* 694 * We rely on the nested NMI work to allow atomic faults from the NMI path; the 695 * nested NMI paths are careful to preserve CR2. 696 * 697 * Caller must use pagefault_enable/disable, or run in interrupt context, 698 * and also do a uaccess_ok() check 699 */ 700 #define __copy_from_user_nmi __copy_from_user_inatomic 701 702 /* 703 * The "unsafe" user accesses aren't really "unsafe", but the naming 704 * is a big fat warning: you have to not only do the access_ok() 705 * checking before using them, but you have to surround them with the 706 * user_access_begin/end() pair. 707 */ 708 #define user_access_begin() __uaccess_begin() 709 #define user_access_end() __uaccess_end() 710 711 #define unsafe_put_user(x, ptr, err_label) \ 712 do { \ 713 int __pu_err; \ 714 __typeof__(*(ptr)) __pu_val = (x); \ 715 __put_user_size(__pu_val, (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \ 716 if (unlikely(__pu_err)) goto err_label; \ 717 } while (0) 718 719 #define unsafe_get_user(x, ptr, err_label) \ 720 do { \ 721 int __gu_err; \ 722 __inttype(*(ptr)) __gu_val; \ 723 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \ 724 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 725 if (unlikely(__gu_err)) goto err_label; \ 726 } while (0) 727 728 #endif /* _ASM_X86_UACCESS_H */ 729 730