1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_UACCESS_H 3 #define _ASM_X86_UACCESS_H 4 /* 5 * User space memory access functions 6 */ 7 #include <linux/compiler.h> 8 #include <linux/kasan-checks.h> 9 #include <linux/string.h> 10 #include <asm/asm.h> 11 #include <asm/page.h> 12 #include <asm/smap.h> 13 #include <asm/extable.h> 14 15 /* 16 * The fs value determines whether argument validity checking should be 17 * performed or not. If get_fs() == USER_DS, checking is performed, with 18 * get_fs() == KERNEL_DS, checking is bypassed. 19 * 20 * For historical reasons, these macros are grossly misnamed. 21 */ 22 23 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 24 25 #define KERNEL_DS MAKE_MM_SEG(-1UL) 26 #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX) 27 28 #define get_fs() (current->thread.addr_limit) 29 static inline void set_fs(mm_segment_t fs) 30 { 31 current->thread.addr_limit = fs; 32 /* On user-mode return, check fs is correct */ 33 set_thread_flag(TIF_FSCHECK); 34 } 35 36 #define segment_eq(a, b) ((a).seg == (b).seg) 37 #define user_addr_max() (current->thread.addr_limit.seg) 38 39 /* 40 * Test whether a block of memory is a valid user space address. 41 * Returns 0 if the range is valid, nonzero otherwise. 42 */ 43 static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit) 44 { 45 /* 46 * If we have used "sizeof()" for the size, 47 * we know it won't overflow the limit (but 48 * it might overflow the 'addr', so it's 49 * important to subtract the size from the 50 * limit, not add it to the address). 51 */ 52 if (__builtin_constant_p(size)) 53 return unlikely(addr > limit - size); 54 55 /* Arbitrary sizes? Be careful about overflow */ 56 addr += size; 57 if (unlikely(addr < size)) 58 return true; 59 return unlikely(addr > limit); 60 } 61 62 #define __range_not_ok(addr, size, limit) \ 63 ({ \ 64 __chk_user_ptr(addr); \ 65 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ 66 }) 67 68 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 69 static inline bool pagefault_disabled(void); 70 # define WARN_ON_IN_IRQ() \ 71 WARN_ON_ONCE(!in_task() && !pagefault_disabled()) 72 #else 73 # define WARN_ON_IN_IRQ() 74 #endif 75 76 /** 77 * access_ok - Checks if a user space pointer is valid 78 * @addr: User space pointer to start of block to check 79 * @size: Size of block to check 80 * 81 * Context: User context only. This function may sleep if pagefaults are 82 * enabled. 83 * 84 * Checks if a pointer to a block of memory in user space is valid. 85 * 86 * Note that, depending on architecture, this function probably just 87 * checks that the pointer is in the user space range - after calling 88 * this function, memory access functions may still return -EFAULT. 89 * 90 * Return: true (nonzero) if the memory block may be valid, false (zero) 91 * if it is definitely invalid. 92 */ 93 #define access_ok(addr, size) \ 94 ({ \ 95 WARN_ON_IN_IRQ(); \ 96 likely(!__range_not_ok(addr, size, user_addr_max())); \ 97 }) 98 99 /* 100 * These are the main single-value transfer routines. They automatically 101 * use the right size if we just have the right pointer type. 102 * 103 * This gets kind of ugly. We want to return _two_ values in "get_user()" 104 * and yet we don't want to do any pointers, because that is too much 105 * of a performance impact. Thus we have a few rather ugly macros here, 106 * and hide all the ugliness from the user. 107 * 108 * The "__xxx" versions of the user access functions are versions that 109 * do not verify the address space, that must have been done previously 110 * with a separate "access_ok()" call (this is used when we do multiple 111 * accesses to the same area of user memory). 112 */ 113 114 extern int __get_user_1(void); 115 extern int __get_user_2(void); 116 extern int __get_user_4(void); 117 extern int __get_user_8(void); 118 extern int __get_user_bad(void); 119 120 #define __uaccess_begin() stac() 121 #define __uaccess_end() clac() 122 #define __uaccess_begin_nospec() \ 123 ({ \ 124 stac(); \ 125 barrier_nospec(); \ 126 }) 127 128 /* 129 * This is a type: either unsigned long, if the argument fits into 130 * that type, or otherwise unsigned long long. 131 */ 132 #define __inttype(x) \ 133 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) 134 135 /** 136 * get_user - Get a simple variable from user space. 137 * @x: Variable to store result. 138 * @ptr: Source address, in user space. 139 * 140 * Context: User context only. This function may sleep if pagefaults are 141 * enabled. 142 * 143 * This macro copies a single simple variable from user space to kernel 144 * space. It supports simple types like char and int, but not larger 145 * data types like structures or arrays. 146 * 147 * @ptr must have pointer-to-simple-variable type, and the result of 148 * dereferencing @ptr must be assignable to @x without a cast. 149 * 150 * Return: zero on success, or -EFAULT on error. 151 * On error, the variable @x is set to zero. 152 */ 153 /* 154 * Careful: we have to cast the result to the type of the pointer 155 * for sign reasons. 156 * 157 * The use of _ASM_DX as the register specifier is a bit of a 158 * simplification, as gcc only cares about it as the starting point 159 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits 160 * (%ecx being the next register in gcc's x86 register sequence), and 161 * %rdx on 64 bits. 162 * 163 * Clang/LLVM cares about the size of the register, but still wants 164 * the base register for something that ends up being a pair. 165 */ 166 #define get_user(x, ptr) \ 167 ({ \ 168 int __ret_gu; \ 169 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ 170 __chk_user_ptr(ptr); \ 171 might_fault(); \ 172 asm volatile("call __get_user_%P4" \ 173 : "=a" (__ret_gu), "=r" (__val_gu), \ 174 ASM_CALL_CONSTRAINT \ 175 : "0" (ptr), "i" (sizeof(*(ptr)))); \ 176 (x) = (__force __typeof__(*(ptr))) __val_gu; \ 177 __builtin_expect(__ret_gu, 0); \ 178 }) 179 180 #define __put_user_x(size, x, ptr, __ret_pu) \ 181 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ 182 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 183 184 185 186 #ifdef CONFIG_X86_32 187 #define __put_user_goto_u64(x, addr, label) \ 188 asm_volatile_goto("\n" \ 189 "1: movl %%eax,0(%1)\n" \ 190 "2: movl %%edx,4(%1)\n" \ 191 _ASM_EXTABLE_UA(1b, %l2) \ 192 _ASM_EXTABLE_UA(2b, %l2) \ 193 : : "A" (x), "r" (addr) \ 194 : : label) 195 196 #define __put_user_asm_ex_u64(x, addr) \ 197 asm volatile("\n" \ 198 "1: movl %%eax,0(%1)\n" \ 199 "2: movl %%edx,4(%1)\n" \ 200 "3:" \ 201 _ASM_EXTABLE_EX(1b, 2b) \ 202 _ASM_EXTABLE_EX(2b, 3b) \ 203 : : "A" (x), "r" (addr)) 204 205 #define __put_user_x8(x, ptr, __ret_pu) \ 206 asm volatile("call __put_user_8" : "=a" (__ret_pu) \ 207 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 208 #else 209 #define __put_user_goto_u64(x, ptr, label) \ 210 __put_user_goto(x, ptr, "q", "", "er", label) 211 #define __put_user_asm_ex_u64(x, addr) \ 212 __put_user_asm_ex(x, addr, "q", "", "er") 213 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) 214 #endif 215 216 extern void __put_user_bad(void); 217 218 /* 219 * Strange magic calling convention: pointer in %ecx, 220 * value in %eax(:%edx), return value in %eax. clobbers %rbx 221 */ 222 extern void __put_user_1(void); 223 extern void __put_user_2(void); 224 extern void __put_user_4(void); 225 extern void __put_user_8(void); 226 227 /** 228 * put_user - Write a simple value into user space. 229 * @x: Value to copy to user space. 230 * @ptr: Destination address, in user space. 231 * 232 * Context: User context only. This function may sleep if pagefaults are 233 * enabled. 234 * 235 * This macro copies a single simple value from kernel space to user 236 * space. It supports simple types like char and int, but not larger 237 * data types like structures or arrays. 238 * 239 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 240 * to the result of dereferencing @ptr. 241 * 242 * Return: zero on success, or -EFAULT on error. 243 */ 244 #define put_user(x, ptr) \ 245 ({ \ 246 int __ret_pu; \ 247 __typeof__(*(ptr)) __pu_val; \ 248 __chk_user_ptr(ptr); \ 249 might_fault(); \ 250 __pu_val = x; \ 251 switch (sizeof(*(ptr))) { \ 252 case 1: \ 253 __put_user_x(1, __pu_val, ptr, __ret_pu); \ 254 break; \ 255 case 2: \ 256 __put_user_x(2, __pu_val, ptr, __ret_pu); \ 257 break; \ 258 case 4: \ 259 __put_user_x(4, __pu_val, ptr, __ret_pu); \ 260 break; \ 261 case 8: \ 262 __put_user_x8(__pu_val, ptr, __ret_pu); \ 263 break; \ 264 default: \ 265 __put_user_x(X, __pu_val, ptr, __ret_pu); \ 266 break; \ 267 } \ 268 __builtin_expect(__ret_pu, 0); \ 269 }) 270 271 #define __put_user_size(x, ptr, size, label) \ 272 do { \ 273 __chk_user_ptr(ptr); \ 274 switch (size) { \ 275 case 1: \ 276 __put_user_goto(x, ptr, "b", "b", "iq", label); \ 277 break; \ 278 case 2: \ 279 __put_user_goto(x, ptr, "w", "w", "ir", label); \ 280 break; \ 281 case 4: \ 282 __put_user_goto(x, ptr, "l", "k", "ir", label); \ 283 break; \ 284 case 8: \ 285 __put_user_goto_u64(x, ptr, label); \ 286 break; \ 287 default: \ 288 __put_user_bad(); \ 289 } \ 290 } while (0) 291 292 /* 293 * This doesn't do __uaccess_begin/end - the exception handling 294 * around it must do that. 295 */ 296 #define __put_user_size_ex(x, ptr, size) \ 297 do { \ 298 __chk_user_ptr(ptr); \ 299 switch (size) { \ 300 case 1: \ 301 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \ 302 break; \ 303 case 2: \ 304 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \ 305 break; \ 306 case 4: \ 307 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \ 308 break; \ 309 case 8: \ 310 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \ 311 break; \ 312 default: \ 313 __put_user_bad(); \ 314 } \ 315 } while (0) 316 317 #ifdef CONFIG_X86_32 318 #define __get_user_asm_u64(x, ptr, retval, errret) \ 319 ({ \ 320 __typeof__(ptr) __ptr = (ptr); \ 321 asm volatile("\n" \ 322 "1: movl %2,%%eax\n" \ 323 "2: movl %3,%%edx\n" \ 324 "3:\n" \ 325 ".section .fixup,\"ax\"\n" \ 326 "4: mov %4,%0\n" \ 327 " xorl %%eax,%%eax\n" \ 328 " xorl %%edx,%%edx\n" \ 329 " jmp 3b\n" \ 330 ".previous\n" \ 331 _ASM_EXTABLE_UA(1b, 4b) \ 332 _ASM_EXTABLE_UA(2b, 4b) \ 333 : "=r" (retval), "=&A"(x) \ 334 : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \ 335 "i" (errret), "0" (retval)); \ 336 }) 337 338 #define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad() 339 #else 340 #define __get_user_asm_u64(x, ptr, retval, errret) \ 341 __get_user_asm(x, ptr, retval, "q", "", "=r", errret) 342 #define __get_user_asm_ex_u64(x, ptr) \ 343 __get_user_asm_ex(x, ptr, "q", "", "=r") 344 #endif 345 346 #define __get_user_size(x, ptr, size, retval, errret) \ 347 do { \ 348 retval = 0; \ 349 __chk_user_ptr(ptr); \ 350 switch (size) { \ 351 case 1: \ 352 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \ 353 break; \ 354 case 2: \ 355 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \ 356 break; \ 357 case 4: \ 358 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \ 359 break; \ 360 case 8: \ 361 __get_user_asm_u64(x, ptr, retval, errret); \ 362 break; \ 363 default: \ 364 (x) = __get_user_bad(); \ 365 } \ 366 } while (0) 367 368 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 369 asm volatile("\n" \ 370 "1: mov"itype" %2,%"rtype"1\n" \ 371 "2:\n" \ 372 ".section .fixup,\"ax\"\n" \ 373 "3: mov %3,%0\n" \ 374 " xor"itype" %"rtype"1,%"rtype"1\n" \ 375 " jmp 2b\n" \ 376 ".previous\n" \ 377 _ASM_EXTABLE_UA(1b, 3b) \ 378 : "=r" (err), ltype(x) \ 379 : "m" (__m(addr)), "i" (errret), "0" (err)) 380 381 #define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \ 382 asm volatile("\n" \ 383 "1: mov"itype" %2,%"rtype"1\n" \ 384 "2:\n" \ 385 ".section .fixup,\"ax\"\n" \ 386 "3: mov %3,%0\n" \ 387 " jmp 2b\n" \ 388 ".previous\n" \ 389 _ASM_EXTABLE_UA(1b, 3b) \ 390 : "=r" (err), ltype(x) \ 391 : "m" (__m(addr)), "i" (errret), "0" (err)) 392 393 /* 394 * This doesn't do __uaccess_begin/end - the exception handling 395 * around it must do that. 396 */ 397 #define __get_user_size_ex(x, ptr, size) \ 398 do { \ 399 __chk_user_ptr(ptr); \ 400 switch (size) { \ 401 case 1: \ 402 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \ 403 break; \ 404 case 2: \ 405 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \ 406 break; \ 407 case 4: \ 408 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \ 409 break; \ 410 case 8: \ 411 __get_user_asm_ex_u64(x, ptr); \ 412 break; \ 413 default: \ 414 (x) = __get_user_bad(); \ 415 } \ 416 } while (0) 417 418 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ 419 asm volatile("1: mov"itype" %1,%"rtype"0\n" \ 420 "2:\n" \ 421 ".section .fixup,\"ax\"\n" \ 422 "3:xor"itype" %"rtype"0,%"rtype"0\n" \ 423 " jmp 2b\n" \ 424 ".previous\n" \ 425 _ASM_EXTABLE_EX(1b, 3b) \ 426 : ltype(x) : "m" (__m(addr))) 427 428 #define __put_user_nocheck(x, ptr, size) \ 429 ({ \ 430 __label__ __pu_label; \ 431 int __pu_err = -EFAULT; \ 432 __typeof__(*(ptr)) __pu_val = (x); \ 433 __typeof__(ptr) __pu_ptr = (ptr); \ 434 __typeof__(size) __pu_size = (size); \ 435 __uaccess_begin(); \ 436 __put_user_size(__pu_val, __pu_ptr, __pu_size, __pu_label); \ 437 __pu_err = 0; \ 438 __pu_label: \ 439 __uaccess_end(); \ 440 __builtin_expect(__pu_err, 0); \ 441 }) 442 443 #define __get_user_nocheck(x, ptr, size) \ 444 ({ \ 445 int __gu_err; \ 446 __inttype(*(ptr)) __gu_val; \ 447 __uaccess_begin_nospec(); \ 448 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ 449 __uaccess_end(); \ 450 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 451 __builtin_expect(__gu_err, 0); \ 452 }) 453 454 /* FIXME: this hack is definitely wrong -AK */ 455 struct __large_struct { unsigned long buf[100]; }; 456 #define __m(x) (*(struct __large_struct __user *)(x)) 457 458 /* 459 * Tell gcc we read from memory instead of writing: this is because 460 * we do not write to any memory gcc knows about, so there are no 461 * aliasing issues. 462 */ 463 #define __put_user_goto(x, addr, itype, rtype, ltype, label) \ 464 asm_volatile_goto("\n" \ 465 "1: mov"itype" %"rtype"0,%1\n" \ 466 _ASM_EXTABLE_UA(1b, %l2) \ 467 : : ltype(x), "m" (__m(addr)) \ 468 : : label) 469 470 #define __put_user_failed(x, addr, itype, rtype, ltype, errret) \ 471 ({ __label__ __puflab; \ 472 int __pufret = errret; \ 473 __put_user_goto(x,addr,itype,rtype,ltype,__puflab); \ 474 __pufret = 0; \ 475 __puflab: __pufret; }) 476 477 #define __put_user_asm(x, addr, retval, itype, rtype, ltype, errret) do { \ 478 retval = __put_user_failed(x, addr, itype, rtype, ltype, errret); \ 479 } while (0) 480 481 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ 482 asm volatile("1: mov"itype" %"rtype"0,%1\n" \ 483 "2:\n" \ 484 _ASM_EXTABLE_EX(1b, 2b) \ 485 : : ltype(x), "m" (__m(addr))) 486 487 /* 488 * uaccess_try and catch 489 */ 490 #define uaccess_try do { \ 491 current->thread.uaccess_err = 0; \ 492 __uaccess_begin(); \ 493 barrier(); 494 495 #define uaccess_try_nospec do { \ 496 current->thread.uaccess_err = 0; \ 497 __uaccess_begin_nospec(); \ 498 499 #define uaccess_catch(err) \ 500 __uaccess_end(); \ 501 (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \ 502 } while (0) 503 504 /** 505 * __get_user - Get a simple variable from user space, with less checking. 506 * @x: Variable to store result. 507 * @ptr: Source address, in user space. 508 * 509 * Context: User context only. This function may sleep if pagefaults are 510 * enabled. 511 * 512 * This macro copies a single simple variable from user space to kernel 513 * space. It supports simple types like char and int, but not larger 514 * data types like structures or arrays. 515 * 516 * @ptr must have pointer-to-simple-variable type, and the result of 517 * dereferencing @ptr must be assignable to @x without a cast. 518 * 519 * Caller must check the pointer with access_ok() before calling this 520 * function. 521 * 522 * Return: zero on success, or -EFAULT on error. 523 * On error, the variable @x is set to zero. 524 */ 525 526 #define __get_user(x, ptr) \ 527 __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 528 529 /** 530 * __put_user - Write a simple value into user space, with less checking. 531 * @x: Value to copy to user space. 532 * @ptr: Destination address, in user space. 533 * 534 * Context: User context only. This function may sleep if pagefaults are 535 * enabled. 536 * 537 * This macro copies a single simple value from kernel space to user 538 * space. It supports simple types like char and int, but not larger 539 * data types like structures or arrays. 540 * 541 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 542 * to the result of dereferencing @ptr. 543 * 544 * Caller must check the pointer with access_ok() before calling this 545 * function. 546 * 547 * Return: zero on success, or -EFAULT on error. 548 */ 549 550 #define __put_user(x, ptr) \ 551 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 552 553 /* 554 * {get|put}_user_try and catch 555 * 556 * get_user_try { 557 * get_user_ex(...); 558 * } get_user_catch(err) 559 */ 560 #define get_user_try uaccess_try_nospec 561 #define get_user_catch(err) uaccess_catch(err) 562 563 #define get_user_ex(x, ptr) do { \ 564 unsigned long __gue_val; \ 565 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \ 566 (x) = (__force __typeof__(*(ptr)))__gue_val; \ 567 } while (0) 568 569 #define put_user_try uaccess_try 570 #define put_user_catch(err) uaccess_catch(err) 571 572 #define put_user_ex(x, ptr) \ 573 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 574 575 extern unsigned long 576 copy_from_user_nmi(void *to, const void __user *from, unsigned long n); 577 extern __must_check long 578 strncpy_from_user(char *dst, const char __user *src, long count); 579 580 extern __must_check long strnlen_user(const char __user *str, long n); 581 582 unsigned long __must_check clear_user(void __user *mem, unsigned long len); 583 unsigned long __must_check __clear_user(void __user *mem, unsigned long len); 584 585 extern void __cmpxchg_wrong_size(void) 586 __compiletime_error("Bad argument size for cmpxchg"); 587 588 #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \ 589 ({ \ 590 int __ret = 0; \ 591 __typeof__(*(ptr)) __old = (old); \ 592 __typeof__(*(ptr)) __new = (new); \ 593 __uaccess_begin_nospec(); \ 594 switch (size) { \ 595 case 1: \ 596 { \ 597 asm volatile("\n" \ 598 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \ 599 "2:\n" \ 600 "\t.section .fixup, \"ax\"\n" \ 601 "3:\tmov %3, %0\n" \ 602 "\tjmp 2b\n" \ 603 "\t.previous\n" \ 604 _ASM_EXTABLE_UA(1b, 3b) \ 605 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 606 : "i" (-EFAULT), "q" (__new), "1" (__old) \ 607 : "memory" \ 608 ); \ 609 break; \ 610 } \ 611 case 2: \ 612 { \ 613 asm volatile("\n" \ 614 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \ 615 "2:\n" \ 616 "\t.section .fixup, \"ax\"\n" \ 617 "3:\tmov %3, %0\n" \ 618 "\tjmp 2b\n" \ 619 "\t.previous\n" \ 620 _ASM_EXTABLE_UA(1b, 3b) \ 621 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 622 : "i" (-EFAULT), "r" (__new), "1" (__old) \ 623 : "memory" \ 624 ); \ 625 break; \ 626 } \ 627 case 4: \ 628 { \ 629 asm volatile("\n" \ 630 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \ 631 "2:\n" \ 632 "\t.section .fixup, \"ax\"\n" \ 633 "3:\tmov %3, %0\n" \ 634 "\tjmp 2b\n" \ 635 "\t.previous\n" \ 636 _ASM_EXTABLE_UA(1b, 3b) \ 637 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 638 : "i" (-EFAULT), "r" (__new), "1" (__old) \ 639 : "memory" \ 640 ); \ 641 break; \ 642 } \ 643 case 8: \ 644 { \ 645 if (!IS_ENABLED(CONFIG_X86_64)) \ 646 __cmpxchg_wrong_size(); \ 647 \ 648 asm volatile("\n" \ 649 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \ 650 "2:\n" \ 651 "\t.section .fixup, \"ax\"\n" \ 652 "3:\tmov %3, %0\n" \ 653 "\tjmp 2b\n" \ 654 "\t.previous\n" \ 655 _ASM_EXTABLE_UA(1b, 3b) \ 656 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 657 : "i" (-EFAULT), "r" (__new), "1" (__old) \ 658 : "memory" \ 659 ); \ 660 break; \ 661 } \ 662 default: \ 663 __cmpxchg_wrong_size(); \ 664 } \ 665 __uaccess_end(); \ 666 *(uval) = __old; \ 667 __ret; \ 668 }) 669 670 #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \ 671 ({ \ 672 access_ok((ptr), sizeof(*(ptr))) ? \ 673 __user_atomic_cmpxchg_inatomic((uval), (ptr), \ 674 (old), (new), sizeof(*(ptr))) : \ 675 -EFAULT; \ 676 }) 677 678 /* 679 * movsl can be slow when source and dest are not both 8-byte aligned 680 */ 681 #ifdef CONFIG_X86_INTEL_USERCOPY 682 extern struct movsl_mask { 683 int mask; 684 } ____cacheline_aligned_in_smp movsl_mask; 685 #endif 686 687 #define ARCH_HAS_NOCACHE_UACCESS 1 688 689 #ifdef CONFIG_X86_32 690 # include <asm/uaccess_32.h> 691 #else 692 # include <asm/uaccess_64.h> 693 #endif 694 695 /* 696 * We rely on the nested NMI work to allow atomic faults from the NMI path; the 697 * nested NMI paths are careful to preserve CR2. 698 * 699 * Caller must use pagefault_enable/disable, or run in interrupt context, 700 * and also do a uaccess_ok() check 701 */ 702 #define __copy_from_user_nmi __copy_from_user_inatomic 703 704 /* 705 * The "unsafe" user accesses aren't really "unsafe", but the naming 706 * is a big fat warning: you have to not only do the access_ok() 707 * checking before using them, but you have to surround them with the 708 * user_access_begin/end() pair. 709 */ 710 static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len) 711 { 712 if (unlikely(!access_ok(ptr,len))) 713 return 0; 714 __uaccess_begin_nospec(); 715 return 1; 716 } 717 #define user_access_begin(a,b) user_access_begin(a,b) 718 #define user_access_end() __uaccess_end() 719 720 #define user_access_save() smap_save() 721 #define user_access_restore(x) smap_restore(x) 722 723 #define unsafe_put_user(x, ptr, label) \ 724 __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label) 725 726 #define unsafe_get_user(x, ptr, err_label) \ 727 do { \ 728 int __gu_err; \ 729 __inttype(*(ptr)) __gu_val; \ 730 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \ 731 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 732 if (unlikely(__gu_err)) goto err_label; \ 733 } while (0) 734 735 #endif /* _ASM_X86_UACCESS_H */ 736 737