1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_UACCESS_H 3 #define _ASM_X86_UACCESS_H 4 /* 5 * User space memory access functions 6 */ 7 #include <linux/compiler.h> 8 #include <linux/instrumented.h> 9 #include <linux/kasan-checks.h> 10 #include <linux/string.h> 11 #include <asm/asm.h> 12 #include <asm/page.h> 13 #include <asm/smap.h> 14 #include <asm/extable.h> 15 16 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 17 static inline bool pagefault_disabled(void); 18 # define WARN_ON_IN_IRQ() \ 19 WARN_ON_ONCE(!in_task() && !pagefault_disabled()) 20 #else 21 # define WARN_ON_IN_IRQ() 22 #endif 23 24 /** 25 * access_ok - Checks if a user space pointer is valid 26 * @addr: User space pointer to start of block to check 27 * @size: Size of block to check 28 * 29 * Context: User context only. This function may sleep if pagefaults are 30 * enabled. 31 * 32 * Checks if a pointer to a block of memory in user space is valid. 33 * 34 * Note that, depending on architecture, this function probably just 35 * checks that the pointer is in the user space range - after calling 36 * this function, memory access functions may still return -EFAULT. 37 * 38 * Return: true (nonzero) if the memory block may be valid, false (zero) 39 * if it is definitely invalid. 40 */ 41 #define access_ok(addr, size) \ 42 ({ \ 43 WARN_ON_IN_IRQ(); \ 44 likely(__access_ok(addr, size)); \ 45 }) 46 47 #include <asm-generic/access_ok.h> 48 49 extern int __get_user_1(void); 50 extern int __get_user_2(void); 51 extern int __get_user_4(void); 52 extern int __get_user_8(void); 53 extern int __get_user_nocheck_1(void); 54 extern int __get_user_nocheck_2(void); 55 extern int __get_user_nocheck_4(void); 56 extern int __get_user_nocheck_8(void); 57 extern int __get_user_bad(void); 58 59 #define __uaccess_begin() stac() 60 #define __uaccess_end() clac() 61 #define __uaccess_begin_nospec() \ 62 ({ \ 63 stac(); \ 64 barrier_nospec(); \ 65 }) 66 67 /* 68 * This is the smallest unsigned integer type that can fit a value 69 * (up to 'long long') 70 */ 71 #define __inttype(x) __typeof__( \ 72 __typefits(x,char, \ 73 __typefits(x,short, \ 74 __typefits(x,int, \ 75 __typefits(x,long,0ULL))))) 76 77 #define __typefits(x,type,not) \ 78 __builtin_choose_expr(sizeof(x)<=sizeof(type),(unsigned type)0,not) 79 80 /* 81 * This is used for both get_user() and __get_user() to expand to 82 * the proper special function call that has odd calling conventions 83 * due to returning both a value and an error, and that depends on 84 * the size of the pointer passed in. 85 * 86 * Careful: we have to cast the result to the type of the pointer 87 * for sign reasons. 88 * 89 * The use of _ASM_DX as the register specifier is a bit of a 90 * simplification, as gcc only cares about it as the starting point 91 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits 92 * (%ecx being the next register in gcc's x86 register sequence), and 93 * %rdx on 64 bits. 94 * 95 * Clang/LLVM cares about the size of the register, but still wants 96 * the base register for something that ends up being a pair. 97 */ 98 #define do_get_user_call(fn,x,ptr) \ 99 ({ \ 100 int __ret_gu; \ 101 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ 102 __chk_user_ptr(ptr); \ 103 asm volatile("call __" #fn "_%P4" \ 104 : "=a" (__ret_gu), "=r" (__val_gu), \ 105 ASM_CALL_CONSTRAINT \ 106 : "0" (ptr), "i" (sizeof(*(ptr)))); \ 107 instrument_get_user(__val_gu); \ 108 (x) = (__force __typeof__(*(ptr))) __val_gu; \ 109 __builtin_expect(__ret_gu, 0); \ 110 }) 111 112 /** 113 * get_user - Get a simple variable from user space. 114 * @x: Variable to store result. 115 * @ptr: Source address, in user space. 116 * 117 * Context: User context only. This function may sleep if pagefaults are 118 * enabled. 119 * 120 * This macro copies a single simple variable from user space to kernel 121 * space. It supports simple types like char and int, but not larger 122 * data types like structures or arrays. 123 * 124 * @ptr must have pointer-to-simple-variable type, and the result of 125 * dereferencing @ptr must be assignable to @x without a cast. 126 * 127 * Return: zero on success, or -EFAULT on error. 128 * On error, the variable @x is set to zero. 129 */ 130 #define get_user(x,ptr) ({ might_fault(); do_get_user_call(get_user,x,ptr); }) 131 132 /** 133 * __get_user - Get a simple variable from user space, with less checking. 134 * @x: Variable to store result. 135 * @ptr: Source address, in user space. 136 * 137 * Context: User context only. This function may sleep if pagefaults are 138 * enabled. 139 * 140 * This macro copies a single simple variable from user space to kernel 141 * space. It supports simple types like char and int, but not larger 142 * data types like structures or arrays. 143 * 144 * @ptr must have pointer-to-simple-variable type, and the result of 145 * dereferencing @ptr must be assignable to @x without a cast. 146 * 147 * Caller must check the pointer with access_ok() before calling this 148 * function. 149 * 150 * Return: zero on success, or -EFAULT on error. 151 * On error, the variable @x is set to zero. 152 */ 153 #define __get_user(x,ptr) do_get_user_call(get_user_nocheck,x,ptr) 154 155 156 #ifdef CONFIG_X86_32 157 #define __put_user_goto_u64(x, addr, label) \ 158 asm_volatile_goto("\n" \ 159 "1: movl %%eax,0(%1)\n" \ 160 "2: movl %%edx,4(%1)\n" \ 161 _ASM_EXTABLE_UA(1b, %l2) \ 162 _ASM_EXTABLE_UA(2b, %l2) \ 163 : : "A" (x), "r" (addr) \ 164 : : label) 165 166 #else 167 #define __put_user_goto_u64(x, ptr, label) \ 168 __put_user_goto(x, ptr, "q", "er", label) 169 #endif 170 171 extern void __put_user_bad(void); 172 173 /* 174 * Strange magic calling convention: pointer in %ecx, 175 * value in %eax(:%edx), return value in %ecx. clobbers %rbx 176 */ 177 extern void __put_user_1(void); 178 extern void __put_user_2(void); 179 extern void __put_user_4(void); 180 extern void __put_user_8(void); 181 extern void __put_user_nocheck_1(void); 182 extern void __put_user_nocheck_2(void); 183 extern void __put_user_nocheck_4(void); 184 extern void __put_user_nocheck_8(void); 185 186 /* 187 * ptr must be evaluated and assigned to the temporary __ptr_pu before 188 * the assignment of x to __val_pu, to avoid any function calls 189 * involved in the ptr expression (possibly implicitly generated due 190 * to KASAN) from clobbering %ax. 191 */ 192 #define do_put_user_call(fn,x,ptr) \ 193 ({ \ 194 int __ret_pu; \ 195 void __user *__ptr_pu; \ 196 register __typeof__(*(ptr)) __val_pu asm("%"_ASM_AX); \ 197 __typeof__(*(ptr)) __x = (x); /* eval x once */ \ 198 __typeof__(ptr) __ptr = (ptr); /* eval ptr once */ \ 199 __chk_user_ptr(__ptr); \ 200 __ptr_pu = __ptr; \ 201 __val_pu = __x; \ 202 asm volatile("call __" #fn "_%P[size]" \ 203 : "=c" (__ret_pu), \ 204 ASM_CALL_CONSTRAINT \ 205 : "0" (__ptr_pu), \ 206 "r" (__val_pu), \ 207 [size] "i" (sizeof(*(ptr))) \ 208 :"ebx"); \ 209 instrument_put_user(__x, __ptr, sizeof(*(ptr))); \ 210 __builtin_expect(__ret_pu, 0); \ 211 }) 212 213 /** 214 * put_user - Write a simple value into user space. 215 * @x: Value to copy to user space. 216 * @ptr: Destination address, in user space. 217 * 218 * Context: User context only. This function may sleep if pagefaults are 219 * enabled. 220 * 221 * This macro copies a single simple value from kernel space to user 222 * space. It supports simple types like char and int, but not larger 223 * data types like structures or arrays. 224 * 225 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 226 * to the result of dereferencing @ptr. 227 * 228 * Return: zero on success, or -EFAULT on error. 229 */ 230 #define put_user(x, ptr) ({ might_fault(); do_put_user_call(put_user,x,ptr); }) 231 232 /** 233 * __put_user - Write a simple value into user space, with less checking. 234 * @x: Value to copy to user space. 235 * @ptr: Destination address, in user space. 236 * 237 * Context: User context only. This function may sleep if pagefaults are 238 * enabled. 239 * 240 * This macro copies a single simple value from kernel space to user 241 * space. It supports simple types like char and int, but not larger 242 * data types like structures or arrays. 243 * 244 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 245 * to the result of dereferencing @ptr. 246 * 247 * Caller must check the pointer with access_ok() before calling this 248 * function. 249 * 250 * Return: zero on success, or -EFAULT on error. 251 */ 252 #define __put_user(x, ptr) do_put_user_call(put_user_nocheck,x,ptr) 253 254 #define __put_user_size(x, ptr, size, label) \ 255 do { \ 256 __typeof__(*(ptr)) __x = (x); /* eval x once */ \ 257 __chk_user_ptr(ptr); \ 258 switch (size) { \ 259 case 1: \ 260 __put_user_goto(__x, ptr, "b", "iq", label); \ 261 break; \ 262 case 2: \ 263 __put_user_goto(__x, ptr, "w", "ir", label); \ 264 break; \ 265 case 4: \ 266 __put_user_goto(__x, ptr, "l", "ir", label); \ 267 break; \ 268 case 8: \ 269 __put_user_goto_u64(__x, ptr, label); \ 270 break; \ 271 default: \ 272 __put_user_bad(); \ 273 } \ 274 instrument_put_user(__x, ptr, size); \ 275 } while (0) 276 277 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT 278 279 #ifdef CONFIG_X86_32 280 #define __get_user_asm_u64(x, ptr, label) do { \ 281 unsigned int __gu_low, __gu_high; \ 282 const unsigned int __user *__gu_ptr; \ 283 __gu_ptr = (const void __user *)(ptr); \ 284 __get_user_asm(__gu_low, __gu_ptr, "l", "=r", label); \ 285 __get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label); \ 286 (x) = ((unsigned long long)__gu_high << 32) | __gu_low; \ 287 } while (0) 288 #else 289 #define __get_user_asm_u64(x, ptr, label) \ 290 __get_user_asm(x, ptr, "q", "=r", label) 291 #endif 292 293 #define __get_user_size(x, ptr, size, label) \ 294 do { \ 295 __chk_user_ptr(ptr); \ 296 switch (size) { \ 297 case 1: { \ 298 unsigned char x_u8__; \ 299 __get_user_asm(x_u8__, ptr, "b", "=q", label); \ 300 (x) = x_u8__; \ 301 break; \ 302 } \ 303 case 2: \ 304 __get_user_asm(x, ptr, "w", "=r", label); \ 305 break; \ 306 case 4: \ 307 __get_user_asm(x, ptr, "l", "=r", label); \ 308 break; \ 309 case 8: \ 310 __get_user_asm_u64(x, ptr, label); \ 311 break; \ 312 default: \ 313 (x) = __get_user_bad(); \ 314 } \ 315 instrument_get_user(x); \ 316 } while (0) 317 318 #define __get_user_asm(x, addr, itype, ltype, label) \ 319 asm_volatile_goto("\n" \ 320 "1: mov"itype" %[umem],%[output]\n" \ 321 _ASM_EXTABLE_UA(1b, %l2) \ 322 : [output] ltype(x) \ 323 : [umem] "m" (__m(addr)) \ 324 : : label) 325 326 #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT 327 328 #ifdef CONFIG_X86_32 329 #define __get_user_asm_u64(x, ptr, retval) \ 330 ({ \ 331 __typeof__(ptr) __ptr = (ptr); \ 332 asm volatile("\n" \ 333 "1: movl %[lowbits],%%eax\n" \ 334 "2: movl %[highbits],%%edx\n" \ 335 "3:\n" \ 336 _ASM_EXTABLE_TYPE_REG(1b, 3b, EX_TYPE_EFAULT_REG | \ 337 EX_FLAG_CLEAR_AX_DX, \ 338 %[errout]) \ 339 _ASM_EXTABLE_TYPE_REG(2b, 3b, EX_TYPE_EFAULT_REG | \ 340 EX_FLAG_CLEAR_AX_DX, \ 341 %[errout]) \ 342 : [errout] "=r" (retval), \ 343 [output] "=&A"(x) \ 344 : [lowbits] "m" (__m(__ptr)), \ 345 [highbits] "m" __m(((u32 __user *)(__ptr)) + 1), \ 346 "0" (retval)); \ 347 }) 348 349 #else 350 #define __get_user_asm_u64(x, ptr, retval) \ 351 __get_user_asm(x, ptr, retval, "q") 352 #endif 353 354 #define __get_user_size(x, ptr, size, retval) \ 355 do { \ 356 unsigned char x_u8__; \ 357 \ 358 retval = 0; \ 359 __chk_user_ptr(ptr); \ 360 switch (size) { \ 361 case 1: \ 362 __get_user_asm(x_u8__, ptr, retval, "b"); \ 363 (x) = x_u8__; \ 364 break; \ 365 case 2: \ 366 __get_user_asm(x, ptr, retval, "w"); \ 367 break; \ 368 case 4: \ 369 __get_user_asm(x, ptr, retval, "l"); \ 370 break; \ 371 case 8: \ 372 __get_user_asm_u64(x, ptr, retval); \ 373 break; \ 374 default: \ 375 (x) = __get_user_bad(); \ 376 } \ 377 } while (0) 378 379 #define __get_user_asm(x, addr, err, itype) \ 380 asm volatile("\n" \ 381 "1: mov"itype" %[umem],%[output]\n" \ 382 "2:\n" \ 383 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG | \ 384 EX_FLAG_CLEAR_AX, \ 385 %[errout]) \ 386 : [errout] "=r" (err), \ 387 [output] "=a" (x) \ 388 : [umem] "m" (__m(addr)), \ 389 "0" (err)) 390 391 #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT 392 393 #ifdef CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT 394 #define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label) ({ \ 395 bool success; \ 396 __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \ 397 __typeof__(*(_ptr)) __old = *_old; \ 398 __typeof__(*(_ptr)) __new = (_new); \ 399 asm_volatile_goto("\n" \ 400 "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\ 401 _ASM_EXTABLE_UA(1b, %l[label]) \ 402 : CC_OUT(z) (success), \ 403 [ptr] "+m" (*_ptr), \ 404 [old] "+a" (__old) \ 405 : [new] ltype (__new) \ 406 : "memory" \ 407 : label); \ 408 if (unlikely(!success)) \ 409 *_old = __old; \ 410 likely(success); }) 411 412 #ifdef CONFIG_X86_32 413 #define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label) ({ \ 414 bool success; \ 415 __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \ 416 __typeof__(*(_ptr)) __old = *_old; \ 417 __typeof__(*(_ptr)) __new = (_new); \ 418 asm_volatile_goto("\n" \ 419 "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \ 420 _ASM_EXTABLE_UA(1b, %l[label]) \ 421 : CC_OUT(z) (success), \ 422 "+A" (__old), \ 423 [ptr] "+m" (*_ptr) \ 424 : "b" ((u32)__new), \ 425 "c" ((u32)((u64)__new >> 32)) \ 426 : "memory" \ 427 : label); \ 428 if (unlikely(!success)) \ 429 *_old = __old; \ 430 likely(success); }) 431 #endif // CONFIG_X86_32 432 #else // !CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT 433 #define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label) ({ \ 434 int __err = 0; \ 435 bool success; \ 436 __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \ 437 __typeof__(*(_ptr)) __old = *_old; \ 438 __typeof__(*(_ptr)) __new = (_new); \ 439 asm volatile("\n" \ 440 "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\ 441 CC_SET(z) \ 442 "2:\n" \ 443 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, \ 444 %[errout]) \ 445 : CC_OUT(z) (success), \ 446 [errout] "+r" (__err), \ 447 [ptr] "+m" (*_ptr), \ 448 [old] "+a" (__old) \ 449 : [new] ltype (__new) \ 450 : "memory"); \ 451 if (unlikely(__err)) \ 452 goto label; \ 453 if (unlikely(!success)) \ 454 *_old = __old; \ 455 likely(success); }) 456 457 #ifdef CONFIG_X86_32 458 /* 459 * Unlike the normal CMPXCHG, use output GPR for both success/fail and error. 460 * There are only six GPRs available and four (EAX, EBX, ECX, and EDX) are 461 * hardcoded by CMPXCHG8B, leaving only ESI and EDI. If the compiler uses 462 * both ESI and EDI for the memory operand, compilation will fail if the error 463 * is an input+output as there will be no register available for input. 464 */ 465 #define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label) ({ \ 466 int __result; \ 467 __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \ 468 __typeof__(*(_ptr)) __old = *_old; \ 469 __typeof__(*(_ptr)) __new = (_new); \ 470 asm volatile("\n" \ 471 "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \ 472 "mov $0, %[result]\n\t" \ 473 "setz %b[result]\n" \ 474 "2:\n" \ 475 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, \ 476 %[result]) \ 477 : [result] "=q" (__result), \ 478 "+A" (__old), \ 479 [ptr] "+m" (*_ptr) \ 480 : "b" ((u32)__new), \ 481 "c" ((u32)((u64)__new >> 32)) \ 482 : "memory", "cc"); \ 483 if (unlikely(__result < 0)) \ 484 goto label; \ 485 if (unlikely(!__result)) \ 486 *_old = __old; \ 487 likely(__result); }) 488 #endif // CONFIG_X86_32 489 #endif // CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT 490 491 /* FIXME: this hack is definitely wrong -AK */ 492 struct __large_struct { unsigned long buf[100]; }; 493 #define __m(x) (*(struct __large_struct __user *)(x)) 494 495 /* 496 * Tell gcc we read from memory instead of writing: this is because 497 * we do not write to any memory gcc knows about, so there are no 498 * aliasing issues. 499 */ 500 #define __put_user_goto(x, addr, itype, ltype, label) \ 501 asm_volatile_goto("\n" \ 502 "1: mov"itype" %0,%1\n" \ 503 _ASM_EXTABLE_UA(1b, %l2) \ 504 : : ltype(x), "m" (__m(addr)) \ 505 : : label) 506 507 extern unsigned long 508 copy_from_user_nmi(void *to, const void __user *from, unsigned long n); 509 extern __must_check long 510 strncpy_from_user(char *dst, const char __user *src, long count); 511 512 extern __must_check long strnlen_user(const char __user *str, long n); 513 514 #ifdef CONFIG_ARCH_HAS_COPY_MC 515 unsigned long __must_check 516 copy_mc_to_kernel(void *to, const void *from, unsigned len); 517 #define copy_mc_to_kernel copy_mc_to_kernel 518 519 unsigned long __must_check 520 copy_mc_to_user(void *to, const void *from, unsigned len); 521 #endif 522 523 /* 524 * movsl can be slow when source and dest are not both 8-byte aligned 525 */ 526 #ifdef CONFIG_X86_INTEL_USERCOPY 527 extern struct movsl_mask { 528 int mask; 529 } ____cacheline_aligned_in_smp movsl_mask; 530 #endif 531 532 #define ARCH_HAS_NOCACHE_UACCESS 1 533 534 #ifdef CONFIG_X86_32 535 unsigned long __must_check clear_user(void __user *mem, unsigned long len); 536 unsigned long __must_check __clear_user(void __user *mem, unsigned long len); 537 # include <asm/uaccess_32.h> 538 #else 539 # include <asm/uaccess_64.h> 540 #endif 541 542 /* 543 * The "unsafe" user accesses aren't really "unsafe", but the naming 544 * is a big fat warning: you have to not only do the access_ok() 545 * checking before using them, but you have to surround them with the 546 * user_access_begin/end() pair. 547 */ 548 static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len) 549 { 550 if (unlikely(!access_ok(ptr,len))) 551 return 0; 552 __uaccess_begin_nospec(); 553 return 1; 554 } 555 #define user_access_begin(a,b) user_access_begin(a,b) 556 #define user_access_end() __uaccess_end() 557 558 #define user_access_save() smap_save() 559 #define user_access_restore(x) smap_restore(x) 560 561 #define unsafe_put_user(x, ptr, label) \ 562 __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label) 563 564 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT 565 #define unsafe_get_user(x, ptr, err_label) \ 566 do { \ 567 __inttype(*(ptr)) __gu_val; \ 568 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), err_label); \ 569 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 570 } while (0) 571 #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT 572 #define unsafe_get_user(x, ptr, err_label) \ 573 do { \ 574 int __gu_err; \ 575 __inttype(*(ptr)) __gu_val; \ 576 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err); \ 577 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 578 if (unlikely(__gu_err)) goto err_label; \ 579 } while (0) 580 #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT 581 582 extern void __try_cmpxchg_user_wrong_size(void); 583 584 #ifndef CONFIG_X86_32 585 #define __try_cmpxchg64_user_asm(_ptr, _oldp, _nval, _label) \ 586 __try_cmpxchg_user_asm("q", "r", (_ptr), (_oldp), (_nval), _label) 587 #endif 588 589 /* 590 * Force the pointer to u<size> to match the size expected by the asm helper. 591 * clang/LLVM compiles all cases and only discards the unused paths after 592 * processing errors, which breaks i386 if the pointer is an 8-byte value. 593 */ 594 #define unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({ \ 595 bool __ret; \ 596 __chk_user_ptr(_ptr); \ 597 switch (sizeof(*(_ptr))) { \ 598 case 1: __ret = __try_cmpxchg_user_asm("b", "q", \ 599 (__force u8 *)(_ptr), (_oldp), \ 600 (_nval), _label); \ 601 break; \ 602 case 2: __ret = __try_cmpxchg_user_asm("w", "r", \ 603 (__force u16 *)(_ptr), (_oldp), \ 604 (_nval), _label); \ 605 break; \ 606 case 4: __ret = __try_cmpxchg_user_asm("l", "r", \ 607 (__force u32 *)(_ptr), (_oldp), \ 608 (_nval), _label); \ 609 break; \ 610 case 8: __ret = __try_cmpxchg64_user_asm((__force u64 *)(_ptr), (_oldp),\ 611 (_nval), _label); \ 612 break; \ 613 default: __try_cmpxchg_user_wrong_size(); \ 614 } \ 615 __ret; }) 616 617 /* "Returns" 0 on success, 1 on failure, -EFAULT if the access faults. */ 618 #define __try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({ \ 619 int __ret = -EFAULT; \ 620 __uaccess_begin_nospec(); \ 621 __ret = !unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label); \ 622 _label: \ 623 __uaccess_end(); \ 624 __ret; \ 625 }) 626 627 /* 628 * We want the unsafe accessors to always be inlined and use 629 * the error labels - thus the macro games. 630 */ 631 #define unsafe_copy_loop(dst, src, len, type, label) \ 632 while (len >= sizeof(type)) { \ 633 unsafe_put_user(*(type *)(src),(type __user *)(dst),label); \ 634 dst += sizeof(type); \ 635 src += sizeof(type); \ 636 len -= sizeof(type); \ 637 } 638 639 #define unsafe_copy_to_user(_dst,_src,_len,label) \ 640 do { \ 641 char __user *__ucu_dst = (_dst); \ 642 const char *__ucu_src = (_src); \ 643 size_t __ucu_len = (_len); \ 644 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \ 645 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \ 646 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \ 647 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \ 648 } while (0) 649 650 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT 651 #define __get_kernel_nofault(dst, src, type, err_label) \ 652 __get_user_size(*((type *)(dst)), (__force type __user *)(src), \ 653 sizeof(type), err_label) 654 #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT 655 #define __get_kernel_nofault(dst, src, type, err_label) \ 656 do { \ 657 int __kr_err; \ 658 \ 659 __get_user_size(*((type *)(dst)), (__force type __user *)(src), \ 660 sizeof(type), __kr_err); \ 661 if (unlikely(__kr_err)) \ 662 goto err_label; \ 663 } while (0) 664 #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT 665 666 #define __put_kernel_nofault(dst, src, type, err_label) \ 667 __put_user_size(*((type *)(src)), (__force type __user *)(dst), \ 668 sizeof(type), err_label) 669 670 #endif /* _ASM_X86_UACCESS_H */ 671 672