1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_UACCESS_H 3 #define _ASM_X86_UACCESS_H 4 /* 5 * User space memory access functions 6 */ 7 #include <linux/compiler.h> 8 #include <linux/instrumented.h> 9 #include <linux/kasan-checks.h> 10 #include <linux/mm_types.h> 11 #include <linux/string.h> 12 #include <linux/mmap_lock.h> 13 #include <asm/asm.h> 14 #include <asm/page.h> 15 #include <asm/smap.h> 16 #include <asm/extable.h> 17 #include <asm/tlbflush.h> 18 19 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 20 static inline bool pagefault_disabled(void); 21 # define WARN_ON_IN_IRQ() \ 22 WARN_ON_ONCE(!in_task() && !pagefault_disabled()) 23 #else 24 # define WARN_ON_IN_IRQ() 25 #endif 26 27 #ifdef CONFIG_ADDRESS_MASKING 28 /* 29 * Mask out tag bits from the address. 30 * 31 * Magic with the 'sign' allows to untag userspace pointer without any branches 32 * while leaving kernel addresses intact. 33 */ 34 static inline unsigned long __untagged_addr(unsigned long addr) 35 { 36 long sign; 37 38 /* 39 * Refer tlbstate_untag_mask directly to avoid RIP-relative relocation 40 * in alternative instructions. The relocation gets wrong when gets 41 * copied to the target place. 42 */ 43 asm (ALTERNATIVE("", 44 "sar $63, %[sign]\n\t" /* user_ptr ? 0 : -1UL */ 45 "or %%gs:tlbstate_untag_mask, %[sign]\n\t" 46 "and %[sign], %[addr]\n\t", X86_FEATURE_LAM) 47 : [addr] "+r" (addr), [sign] "=r" (sign) 48 : "m" (tlbstate_untag_mask), "[sign]" (addr)); 49 50 return addr; 51 } 52 53 #define untagged_addr(addr) ({ \ 54 unsigned long __addr = (__force unsigned long)(addr); \ 55 (__force __typeof__(addr))__untagged_addr(__addr); \ 56 }) 57 58 static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, 59 unsigned long addr) 60 { 61 long sign = addr >> 63; 62 63 mmap_assert_locked(mm); 64 addr &= (mm)->context.untag_mask | sign; 65 66 return addr; 67 } 68 69 #define untagged_addr_remote(mm, addr) ({ \ 70 unsigned long __addr = (__force unsigned long)(addr); \ 71 (__force __typeof__(addr))__untagged_addr_remote(mm, __addr); \ 72 }) 73 74 #else 75 #define untagged_addr(addr) (addr) 76 #endif 77 78 /** 79 * access_ok - Checks if a user space pointer is valid 80 * @addr: User space pointer to start of block to check 81 * @size: Size of block to check 82 * 83 * Context: User context only. This function may sleep if pagefaults are 84 * enabled. 85 * 86 * Checks if a pointer to a block of memory in user space is valid. 87 * 88 * Note that, depending on architecture, this function probably just 89 * checks that the pointer is in the user space range - after calling 90 * this function, memory access functions may still return -EFAULT. 91 * 92 * Return: true (nonzero) if the memory block may be valid, false (zero) 93 * if it is definitely invalid. 94 */ 95 #define access_ok(addr, size) \ 96 ({ \ 97 WARN_ON_IN_IRQ(); \ 98 likely(__access_ok(untagged_addr(addr), size)); \ 99 }) 100 101 #include <asm-generic/access_ok.h> 102 103 extern int __get_user_1(void); 104 extern int __get_user_2(void); 105 extern int __get_user_4(void); 106 extern int __get_user_8(void); 107 extern int __get_user_nocheck_1(void); 108 extern int __get_user_nocheck_2(void); 109 extern int __get_user_nocheck_4(void); 110 extern int __get_user_nocheck_8(void); 111 extern int __get_user_bad(void); 112 113 #define __uaccess_begin() stac() 114 #define __uaccess_end() clac() 115 #define __uaccess_begin_nospec() \ 116 ({ \ 117 stac(); \ 118 barrier_nospec(); \ 119 }) 120 121 /* 122 * This is the smallest unsigned integer type that can fit a value 123 * (up to 'long long') 124 */ 125 #define __inttype(x) __typeof__( \ 126 __typefits(x,char, \ 127 __typefits(x,short, \ 128 __typefits(x,int, \ 129 __typefits(x,long,0ULL))))) 130 131 #define __typefits(x,type,not) \ 132 __builtin_choose_expr(sizeof(x)<=sizeof(type),(unsigned type)0,not) 133 134 /* 135 * This is used for both get_user() and __get_user() to expand to 136 * the proper special function call that has odd calling conventions 137 * due to returning both a value and an error, and that depends on 138 * the size of the pointer passed in. 139 * 140 * Careful: we have to cast the result to the type of the pointer 141 * for sign reasons. 142 * 143 * The use of _ASM_DX as the register specifier is a bit of a 144 * simplification, as gcc only cares about it as the starting point 145 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits 146 * (%ecx being the next register in gcc's x86 register sequence), and 147 * %rdx on 64 bits. 148 * 149 * Clang/LLVM cares about the size of the register, but still wants 150 * the base register for something that ends up being a pair. 151 */ 152 #define do_get_user_call(fn,x,ptr) \ 153 ({ \ 154 int __ret_gu; \ 155 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ 156 __chk_user_ptr(ptr); \ 157 asm volatile("call __" #fn "_%P4" \ 158 : "=a" (__ret_gu), "=r" (__val_gu), \ 159 ASM_CALL_CONSTRAINT \ 160 : "0" (ptr), "i" (sizeof(*(ptr)))); \ 161 instrument_get_user(__val_gu); \ 162 (x) = (__force __typeof__(*(ptr))) __val_gu; \ 163 __builtin_expect(__ret_gu, 0); \ 164 }) 165 166 /** 167 * get_user - Get a simple variable from user space. 168 * @x: Variable to store result. 169 * @ptr: Source address, in user space. 170 * 171 * Context: User context only. This function may sleep if pagefaults are 172 * enabled. 173 * 174 * This macro copies a single simple variable from user space to kernel 175 * space. It supports simple types like char and int, but not larger 176 * data types like structures or arrays. 177 * 178 * @ptr must have pointer-to-simple-variable type, and the result of 179 * dereferencing @ptr must be assignable to @x without a cast. 180 * 181 * Return: zero on success, or -EFAULT on error. 182 * On error, the variable @x is set to zero. 183 */ 184 #define get_user(x,ptr) ({ might_fault(); do_get_user_call(get_user,x,ptr); }) 185 186 /** 187 * __get_user - Get a simple variable from user space, with less checking. 188 * @x: Variable to store result. 189 * @ptr: Source address, in user space. 190 * 191 * Context: User context only. This function may sleep if pagefaults are 192 * enabled. 193 * 194 * This macro copies a single simple variable from user space to kernel 195 * space. It supports simple types like char and int, but not larger 196 * data types like structures or arrays. 197 * 198 * @ptr must have pointer-to-simple-variable type, and the result of 199 * dereferencing @ptr must be assignable to @x without a cast. 200 * 201 * Caller must check the pointer with access_ok() before calling this 202 * function. 203 * 204 * Return: zero on success, or -EFAULT on error. 205 * On error, the variable @x is set to zero. 206 */ 207 #define __get_user(x,ptr) do_get_user_call(get_user_nocheck,x,ptr) 208 209 210 #ifdef CONFIG_X86_32 211 #define __put_user_goto_u64(x, addr, label) \ 212 asm_volatile_goto("\n" \ 213 "1: movl %%eax,0(%1)\n" \ 214 "2: movl %%edx,4(%1)\n" \ 215 _ASM_EXTABLE_UA(1b, %l2) \ 216 _ASM_EXTABLE_UA(2b, %l2) \ 217 : : "A" (x), "r" (addr) \ 218 : : label) 219 220 #else 221 #define __put_user_goto_u64(x, ptr, label) \ 222 __put_user_goto(x, ptr, "q", "er", label) 223 #endif 224 225 extern void __put_user_bad(void); 226 227 /* 228 * Strange magic calling convention: pointer in %ecx, 229 * value in %eax(:%edx), return value in %ecx. clobbers %rbx 230 */ 231 extern void __put_user_1(void); 232 extern void __put_user_2(void); 233 extern void __put_user_4(void); 234 extern void __put_user_8(void); 235 extern void __put_user_nocheck_1(void); 236 extern void __put_user_nocheck_2(void); 237 extern void __put_user_nocheck_4(void); 238 extern void __put_user_nocheck_8(void); 239 240 /* 241 * ptr must be evaluated and assigned to the temporary __ptr_pu before 242 * the assignment of x to __val_pu, to avoid any function calls 243 * involved in the ptr expression (possibly implicitly generated due 244 * to KASAN) from clobbering %ax. 245 */ 246 #define do_put_user_call(fn,x,ptr) \ 247 ({ \ 248 int __ret_pu; \ 249 void __user *__ptr_pu; \ 250 register __typeof__(*(ptr)) __val_pu asm("%"_ASM_AX); \ 251 __typeof__(*(ptr)) __x = (x); /* eval x once */ \ 252 __typeof__(ptr) __ptr = (ptr); /* eval ptr once */ \ 253 __chk_user_ptr(__ptr); \ 254 __ptr_pu = __ptr; \ 255 __val_pu = __x; \ 256 asm volatile("call __" #fn "_%P[size]" \ 257 : "=c" (__ret_pu), \ 258 ASM_CALL_CONSTRAINT \ 259 : "0" (__ptr_pu), \ 260 "r" (__val_pu), \ 261 [size] "i" (sizeof(*(ptr))) \ 262 :"ebx"); \ 263 instrument_put_user(__x, __ptr, sizeof(*(ptr))); \ 264 __builtin_expect(__ret_pu, 0); \ 265 }) 266 267 /** 268 * put_user - Write a simple value into user space. 269 * @x: Value to copy to user space. 270 * @ptr: Destination address, in user space. 271 * 272 * Context: User context only. This function may sleep if pagefaults are 273 * enabled. 274 * 275 * This macro copies a single simple value from kernel space to user 276 * space. It supports simple types like char and int, but not larger 277 * data types like structures or arrays. 278 * 279 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 280 * to the result of dereferencing @ptr. 281 * 282 * Return: zero on success, or -EFAULT on error. 283 */ 284 #define put_user(x, ptr) ({ might_fault(); do_put_user_call(put_user,x,ptr); }) 285 286 /** 287 * __put_user - Write a simple value into user space, with less checking. 288 * @x: Value to copy to user space. 289 * @ptr: Destination address, in user space. 290 * 291 * Context: User context only. This function may sleep if pagefaults are 292 * enabled. 293 * 294 * This macro copies a single simple value from kernel space to user 295 * space. It supports simple types like char and int, but not larger 296 * data types like structures or arrays. 297 * 298 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 299 * to the result of dereferencing @ptr. 300 * 301 * Caller must check the pointer with access_ok() before calling this 302 * function. 303 * 304 * Return: zero on success, or -EFAULT on error. 305 */ 306 #define __put_user(x, ptr) do_put_user_call(put_user_nocheck,x,ptr) 307 308 #define __put_user_size(x, ptr, size, label) \ 309 do { \ 310 __typeof__(*(ptr)) __x = (x); /* eval x once */ \ 311 __typeof__(ptr) __ptr = (ptr); /* eval ptr once */ \ 312 __chk_user_ptr(__ptr); \ 313 switch (size) { \ 314 case 1: \ 315 __put_user_goto(__x, __ptr, "b", "iq", label); \ 316 break; \ 317 case 2: \ 318 __put_user_goto(__x, __ptr, "w", "ir", label); \ 319 break; \ 320 case 4: \ 321 __put_user_goto(__x, __ptr, "l", "ir", label); \ 322 break; \ 323 case 8: \ 324 __put_user_goto_u64(__x, __ptr, label); \ 325 break; \ 326 default: \ 327 __put_user_bad(); \ 328 } \ 329 instrument_put_user(__x, __ptr, size); \ 330 } while (0) 331 332 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT 333 334 #ifdef CONFIG_X86_32 335 #define __get_user_asm_u64(x, ptr, label) do { \ 336 unsigned int __gu_low, __gu_high; \ 337 const unsigned int __user *__gu_ptr; \ 338 __gu_ptr = (const void __user *)(ptr); \ 339 __get_user_asm(__gu_low, __gu_ptr, "l", "=r", label); \ 340 __get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label); \ 341 (x) = ((unsigned long long)__gu_high << 32) | __gu_low; \ 342 } while (0) 343 #else 344 #define __get_user_asm_u64(x, ptr, label) \ 345 __get_user_asm(x, ptr, "q", "=r", label) 346 #endif 347 348 #define __get_user_size(x, ptr, size, label) \ 349 do { \ 350 __chk_user_ptr(ptr); \ 351 switch (size) { \ 352 case 1: { \ 353 unsigned char x_u8__; \ 354 __get_user_asm(x_u8__, ptr, "b", "=q", label); \ 355 (x) = x_u8__; \ 356 break; \ 357 } \ 358 case 2: \ 359 __get_user_asm(x, ptr, "w", "=r", label); \ 360 break; \ 361 case 4: \ 362 __get_user_asm(x, ptr, "l", "=r", label); \ 363 break; \ 364 case 8: \ 365 __get_user_asm_u64(x, ptr, label); \ 366 break; \ 367 default: \ 368 (x) = __get_user_bad(); \ 369 } \ 370 instrument_get_user(x); \ 371 } while (0) 372 373 #define __get_user_asm(x, addr, itype, ltype, label) \ 374 asm_volatile_goto("\n" \ 375 "1: mov"itype" %[umem],%[output]\n" \ 376 _ASM_EXTABLE_UA(1b, %l2) \ 377 : [output] ltype(x) \ 378 : [umem] "m" (__m(addr)) \ 379 : : label) 380 381 #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT 382 383 #ifdef CONFIG_X86_32 384 #define __get_user_asm_u64(x, ptr, retval) \ 385 ({ \ 386 __typeof__(ptr) __ptr = (ptr); \ 387 asm volatile("\n" \ 388 "1: movl %[lowbits],%%eax\n" \ 389 "2: movl %[highbits],%%edx\n" \ 390 "3:\n" \ 391 _ASM_EXTABLE_TYPE_REG(1b, 3b, EX_TYPE_EFAULT_REG | \ 392 EX_FLAG_CLEAR_AX_DX, \ 393 %[errout]) \ 394 _ASM_EXTABLE_TYPE_REG(2b, 3b, EX_TYPE_EFAULT_REG | \ 395 EX_FLAG_CLEAR_AX_DX, \ 396 %[errout]) \ 397 : [errout] "=r" (retval), \ 398 [output] "=&A"(x) \ 399 : [lowbits] "m" (__m(__ptr)), \ 400 [highbits] "m" __m(((u32 __user *)(__ptr)) + 1), \ 401 "0" (retval)); \ 402 }) 403 404 #else 405 #define __get_user_asm_u64(x, ptr, retval) \ 406 __get_user_asm(x, ptr, retval, "q") 407 #endif 408 409 #define __get_user_size(x, ptr, size, retval) \ 410 do { \ 411 unsigned char x_u8__; \ 412 \ 413 retval = 0; \ 414 __chk_user_ptr(ptr); \ 415 switch (size) { \ 416 case 1: \ 417 __get_user_asm(x_u8__, ptr, retval, "b"); \ 418 (x) = x_u8__; \ 419 break; \ 420 case 2: \ 421 __get_user_asm(x, ptr, retval, "w"); \ 422 break; \ 423 case 4: \ 424 __get_user_asm(x, ptr, retval, "l"); \ 425 break; \ 426 case 8: \ 427 __get_user_asm_u64(x, ptr, retval); \ 428 break; \ 429 default: \ 430 (x) = __get_user_bad(); \ 431 } \ 432 } while (0) 433 434 #define __get_user_asm(x, addr, err, itype) \ 435 asm volatile("\n" \ 436 "1: mov"itype" %[umem],%[output]\n" \ 437 "2:\n" \ 438 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG | \ 439 EX_FLAG_CLEAR_AX, \ 440 %[errout]) \ 441 : [errout] "=r" (err), \ 442 [output] "=a" (x) \ 443 : [umem] "m" (__m(addr)), \ 444 "0" (err)) 445 446 #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT 447 448 #ifdef CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT 449 #define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label) ({ \ 450 bool success; \ 451 __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \ 452 __typeof__(*(_ptr)) __old = *_old; \ 453 __typeof__(*(_ptr)) __new = (_new); \ 454 asm_volatile_goto("\n" \ 455 "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\ 456 _ASM_EXTABLE_UA(1b, %l[label]) \ 457 : CC_OUT(z) (success), \ 458 [ptr] "+m" (*_ptr), \ 459 [old] "+a" (__old) \ 460 : [new] ltype (__new) \ 461 : "memory" \ 462 : label); \ 463 if (unlikely(!success)) \ 464 *_old = __old; \ 465 likely(success); }) 466 467 #ifdef CONFIG_X86_32 468 #define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label) ({ \ 469 bool success; \ 470 __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \ 471 __typeof__(*(_ptr)) __old = *_old; \ 472 __typeof__(*(_ptr)) __new = (_new); \ 473 asm_volatile_goto("\n" \ 474 "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \ 475 _ASM_EXTABLE_UA(1b, %l[label]) \ 476 : CC_OUT(z) (success), \ 477 "+A" (__old), \ 478 [ptr] "+m" (*_ptr) \ 479 : "b" ((u32)__new), \ 480 "c" ((u32)((u64)__new >> 32)) \ 481 : "memory" \ 482 : label); \ 483 if (unlikely(!success)) \ 484 *_old = __old; \ 485 likely(success); }) 486 #endif // CONFIG_X86_32 487 #else // !CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT 488 #define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label) ({ \ 489 int __err = 0; \ 490 bool success; \ 491 __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \ 492 __typeof__(*(_ptr)) __old = *_old; \ 493 __typeof__(*(_ptr)) __new = (_new); \ 494 asm volatile("\n" \ 495 "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\ 496 CC_SET(z) \ 497 "2:\n" \ 498 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, \ 499 %[errout]) \ 500 : CC_OUT(z) (success), \ 501 [errout] "+r" (__err), \ 502 [ptr] "+m" (*_ptr), \ 503 [old] "+a" (__old) \ 504 : [new] ltype (__new) \ 505 : "memory"); \ 506 if (unlikely(__err)) \ 507 goto label; \ 508 if (unlikely(!success)) \ 509 *_old = __old; \ 510 likely(success); }) 511 512 #ifdef CONFIG_X86_32 513 /* 514 * Unlike the normal CMPXCHG, use output GPR for both success/fail and error. 515 * There are only six GPRs available and four (EAX, EBX, ECX, and EDX) are 516 * hardcoded by CMPXCHG8B, leaving only ESI and EDI. If the compiler uses 517 * both ESI and EDI for the memory operand, compilation will fail if the error 518 * is an input+output as there will be no register available for input. 519 */ 520 #define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label) ({ \ 521 int __result; \ 522 __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \ 523 __typeof__(*(_ptr)) __old = *_old; \ 524 __typeof__(*(_ptr)) __new = (_new); \ 525 asm volatile("\n" \ 526 "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \ 527 "mov $0, %[result]\n\t" \ 528 "setz %b[result]\n" \ 529 "2:\n" \ 530 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, \ 531 %[result]) \ 532 : [result] "=q" (__result), \ 533 "+A" (__old), \ 534 [ptr] "+m" (*_ptr) \ 535 : "b" ((u32)__new), \ 536 "c" ((u32)((u64)__new >> 32)) \ 537 : "memory", "cc"); \ 538 if (unlikely(__result < 0)) \ 539 goto label; \ 540 if (unlikely(!__result)) \ 541 *_old = __old; \ 542 likely(__result); }) 543 #endif // CONFIG_X86_32 544 #endif // CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT 545 546 /* FIXME: this hack is definitely wrong -AK */ 547 struct __large_struct { unsigned long buf[100]; }; 548 #define __m(x) (*(struct __large_struct __user *)(x)) 549 550 /* 551 * Tell gcc we read from memory instead of writing: this is because 552 * we do not write to any memory gcc knows about, so there are no 553 * aliasing issues. 554 */ 555 #define __put_user_goto(x, addr, itype, ltype, label) \ 556 asm_volatile_goto("\n" \ 557 "1: mov"itype" %0,%1\n" \ 558 _ASM_EXTABLE_UA(1b, %l2) \ 559 : : ltype(x), "m" (__m(addr)) \ 560 : : label) 561 562 extern unsigned long 563 copy_from_user_nmi(void *to, const void __user *from, unsigned long n); 564 extern __must_check long 565 strncpy_from_user(char *dst, const char __user *src, long count); 566 567 extern __must_check long strnlen_user(const char __user *str, long n); 568 569 #ifdef CONFIG_ARCH_HAS_COPY_MC 570 unsigned long __must_check 571 copy_mc_to_kernel(void *to, const void *from, unsigned len); 572 #define copy_mc_to_kernel copy_mc_to_kernel 573 574 unsigned long __must_check 575 copy_mc_to_user(void *to, const void *from, unsigned len); 576 #endif 577 578 /* 579 * movsl can be slow when source and dest are not both 8-byte aligned 580 */ 581 #ifdef CONFIG_X86_INTEL_USERCOPY 582 extern struct movsl_mask { 583 int mask; 584 } ____cacheline_aligned_in_smp movsl_mask; 585 #endif 586 587 #define ARCH_HAS_NOCACHE_UACCESS 1 588 589 #ifdef CONFIG_X86_32 590 unsigned long __must_check clear_user(void __user *mem, unsigned long len); 591 unsigned long __must_check __clear_user(void __user *mem, unsigned long len); 592 # include <asm/uaccess_32.h> 593 #else 594 # include <asm/uaccess_64.h> 595 #endif 596 597 /* 598 * The "unsafe" user accesses aren't really "unsafe", but the naming 599 * is a big fat warning: you have to not only do the access_ok() 600 * checking before using them, but you have to surround them with the 601 * user_access_begin/end() pair. 602 */ 603 static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len) 604 { 605 if (unlikely(!access_ok(ptr,len))) 606 return 0; 607 __uaccess_begin_nospec(); 608 return 1; 609 } 610 #define user_access_begin(a,b) user_access_begin(a,b) 611 #define user_access_end() __uaccess_end() 612 613 #define user_access_save() smap_save() 614 #define user_access_restore(x) smap_restore(x) 615 616 #define unsafe_put_user(x, ptr, label) \ 617 __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label) 618 619 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT 620 #define unsafe_get_user(x, ptr, err_label) \ 621 do { \ 622 __inttype(*(ptr)) __gu_val; \ 623 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), err_label); \ 624 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 625 } while (0) 626 #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT 627 #define unsafe_get_user(x, ptr, err_label) \ 628 do { \ 629 int __gu_err; \ 630 __inttype(*(ptr)) __gu_val; \ 631 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err); \ 632 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 633 if (unlikely(__gu_err)) goto err_label; \ 634 } while (0) 635 #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT 636 637 extern void __try_cmpxchg_user_wrong_size(void); 638 639 #ifndef CONFIG_X86_32 640 #define __try_cmpxchg64_user_asm(_ptr, _oldp, _nval, _label) \ 641 __try_cmpxchg_user_asm("q", "r", (_ptr), (_oldp), (_nval), _label) 642 #endif 643 644 /* 645 * Force the pointer to u<size> to match the size expected by the asm helper. 646 * clang/LLVM compiles all cases and only discards the unused paths after 647 * processing errors, which breaks i386 if the pointer is an 8-byte value. 648 */ 649 #define unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({ \ 650 bool __ret; \ 651 __chk_user_ptr(_ptr); \ 652 switch (sizeof(*(_ptr))) { \ 653 case 1: __ret = __try_cmpxchg_user_asm("b", "q", \ 654 (__force u8 *)(_ptr), (_oldp), \ 655 (_nval), _label); \ 656 break; \ 657 case 2: __ret = __try_cmpxchg_user_asm("w", "r", \ 658 (__force u16 *)(_ptr), (_oldp), \ 659 (_nval), _label); \ 660 break; \ 661 case 4: __ret = __try_cmpxchg_user_asm("l", "r", \ 662 (__force u32 *)(_ptr), (_oldp), \ 663 (_nval), _label); \ 664 break; \ 665 case 8: __ret = __try_cmpxchg64_user_asm((__force u64 *)(_ptr), (_oldp),\ 666 (_nval), _label); \ 667 break; \ 668 default: __try_cmpxchg_user_wrong_size(); \ 669 } \ 670 __ret; }) 671 672 /* "Returns" 0 on success, 1 on failure, -EFAULT if the access faults. */ 673 #define __try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({ \ 674 int __ret = -EFAULT; \ 675 __uaccess_begin_nospec(); \ 676 __ret = !unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label); \ 677 _label: \ 678 __uaccess_end(); \ 679 __ret; \ 680 }) 681 682 /* 683 * We want the unsafe accessors to always be inlined and use 684 * the error labels - thus the macro games. 685 */ 686 #define unsafe_copy_loop(dst, src, len, type, label) \ 687 while (len >= sizeof(type)) { \ 688 unsafe_put_user(*(type *)(src),(type __user *)(dst),label); \ 689 dst += sizeof(type); \ 690 src += sizeof(type); \ 691 len -= sizeof(type); \ 692 } 693 694 #define unsafe_copy_to_user(_dst,_src,_len,label) \ 695 do { \ 696 char __user *__ucu_dst = (_dst); \ 697 const char *__ucu_src = (_src); \ 698 size_t __ucu_len = (_len); \ 699 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \ 700 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \ 701 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \ 702 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \ 703 } while (0) 704 705 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT 706 #define __get_kernel_nofault(dst, src, type, err_label) \ 707 __get_user_size(*((type *)(dst)), (__force type __user *)(src), \ 708 sizeof(type), err_label) 709 #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT 710 #define __get_kernel_nofault(dst, src, type, err_label) \ 711 do { \ 712 int __kr_err; \ 713 \ 714 __get_user_size(*((type *)(dst)), (__force type __user *)(src), \ 715 sizeof(type), __kr_err); \ 716 if (unlikely(__kr_err)) \ 717 goto err_label; \ 718 } while (0) 719 #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT 720 721 #define __put_kernel_nofault(dst, src, type, err_label) \ 722 __put_user_size(*((type *)(src)), (__force type __user *)(dst), \ 723 sizeof(type), err_label) 724 725 #endif /* _ASM_X86_UACCESS_H */ 726 727