1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_UACCESS_H 3 #define _ASM_X86_UACCESS_H 4 /* 5 * User space memory access functions 6 */ 7 #include <linux/compiler.h> 8 #include <linux/kasan-checks.h> 9 #include <linux/string.h> 10 #include <asm/asm.h> 11 #include <asm/page.h> 12 #include <asm/smap.h> 13 #include <asm/extable.h> 14 15 /* 16 * The fs value determines whether argument validity checking should be 17 * performed or not. If get_fs() == USER_DS, checking is performed, with 18 * get_fs() == KERNEL_DS, checking is bypassed. 19 * 20 * For historical reasons, these macros are grossly misnamed. 21 */ 22 23 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 24 25 #define KERNEL_DS MAKE_MM_SEG(-1UL) 26 #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX) 27 28 #define get_fs() (current->thread.addr_limit) 29 static inline void set_fs(mm_segment_t fs) 30 { 31 current->thread.addr_limit = fs; 32 /* On user-mode return, check fs is correct */ 33 set_thread_flag(TIF_FSCHECK); 34 } 35 36 #define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) 37 #define user_addr_max() (current->thread.addr_limit.seg) 38 39 /* 40 * Test whether a block of memory is a valid user space address. 41 * Returns 0 if the range is valid, nonzero otherwise. 42 */ 43 static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit) 44 { 45 /* 46 * If we have used "sizeof()" for the size, 47 * we know it won't overflow the limit (but 48 * it might overflow the 'addr', so it's 49 * important to subtract the size from the 50 * limit, not add it to the address). 51 */ 52 if (__builtin_constant_p(size)) 53 return unlikely(addr > limit - size); 54 55 /* Arbitrary sizes? Be careful about overflow */ 56 addr += size; 57 if (unlikely(addr < size)) 58 return true; 59 return unlikely(addr > limit); 60 } 61 62 #define __range_not_ok(addr, size, limit) \ 63 ({ \ 64 __chk_user_ptr(addr); \ 65 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ 66 }) 67 68 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 69 static inline bool pagefault_disabled(void); 70 # define WARN_ON_IN_IRQ() \ 71 WARN_ON_ONCE(!in_task() && !pagefault_disabled()) 72 #else 73 # define WARN_ON_IN_IRQ() 74 #endif 75 76 /** 77 * access_ok - Checks if a user space pointer is valid 78 * @addr: User space pointer to start of block to check 79 * @size: Size of block to check 80 * 81 * Context: User context only. This function may sleep if pagefaults are 82 * enabled. 83 * 84 * Checks if a pointer to a block of memory in user space is valid. 85 * 86 * Note that, depending on architecture, this function probably just 87 * checks that the pointer is in the user space range - after calling 88 * this function, memory access functions may still return -EFAULT. 89 * 90 * Return: true (nonzero) if the memory block may be valid, false (zero) 91 * if it is definitely invalid. 92 */ 93 #define access_ok(addr, size) \ 94 ({ \ 95 WARN_ON_IN_IRQ(); \ 96 likely(!__range_not_ok(addr, size, user_addr_max())); \ 97 }) 98 99 /* 100 * These are the main single-value transfer routines. They automatically 101 * use the right size if we just have the right pointer type. 102 * 103 * This gets kind of ugly. We want to return _two_ values in "get_user()" 104 * and yet we don't want to do any pointers, because that is too much 105 * of a performance impact. Thus we have a few rather ugly macros here, 106 * and hide all the ugliness from the user. 107 * 108 * The "__xxx" versions of the user access functions are versions that 109 * do not verify the address space, that must have been done previously 110 * with a separate "access_ok()" call (this is used when we do multiple 111 * accesses to the same area of user memory). 112 */ 113 114 extern int __get_user_1(void); 115 extern int __get_user_2(void); 116 extern int __get_user_4(void); 117 extern int __get_user_8(void); 118 extern int __get_user_bad(void); 119 120 #define __uaccess_begin() stac() 121 #define __uaccess_end() clac() 122 #define __uaccess_begin_nospec() \ 123 ({ \ 124 stac(); \ 125 barrier_nospec(); \ 126 }) 127 128 /* 129 * This is the smallest unsigned integer type that can fit a value 130 * (up to 'long long') 131 */ 132 #define __inttype(x) __typeof__( \ 133 __typefits(x,char, \ 134 __typefits(x,short, \ 135 __typefits(x,int, \ 136 __typefits(x,long,0ULL))))) 137 138 #define __typefits(x,type,not) \ 139 __builtin_choose_expr(sizeof(x)<=sizeof(type),(unsigned type)0,not) 140 141 /** 142 * get_user - Get a simple variable from user space. 143 * @x: Variable to store result. 144 * @ptr: Source address, in user space. 145 * 146 * Context: User context only. This function may sleep if pagefaults are 147 * enabled. 148 * 149 * This macro copies a single simple variable from user space to kernel 150 * space. It supports simple types like char and int, but not larger 151 * data types like structures or arrays. 152 * 153 * @ptr must have pointer-to-simple-variable type, and the result of 154 * dereferencing @ptr must be assignable to @x without a cast. 155 * 156 * Return: zero on success, or -EFAULT on error. 157 * On error, the variable @x is set to zero. 158 */ 159 /* 160 * Careful: we have to cast the result to the type of the pointer 161 * for sign reasons. 162 * 163 * The use of _ASM_DX as the register specifier is a bit of a 164 * simplification, as gcc only cares about it as the starting point 165 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits 166 * (%ecx being the next register in gcc's x86 register sequence), and 167 * %rdx on 64 bits. 168 * 169 * Clang/LLVM cares about the size of the register, but still wants 170 * the base register for something that ends up being a pair. 171 */ 172 #define get_user(x, ptr) \ 173 ({ \ 174 int __ret_gu; \ 175 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ 176 __chk_user_ptr(ptr); \ 177 might_fault(); \ 178 asm volatile("call __get_user_%P4" \ 179 : "=a" (__ret_gu), "=r" (__val_gu), \ 180 ASM_CALL_CONSTRAINT \ 181 : "0" (ptr), "i" (sizeof(*(ptr)))); \ 182 (x) = (__force __typeof__(*(ptr))) __val_gu; \ 183 __builtin_expect(__ret_gu, 0); \ 184 }) 185 186 #define __put_user_x(size, x, ptr, __ret_pu) \ 187 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ 188 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 189 190 191 192 #ifdef CONFIG_X86_32 193 #define __put_user_goto_u64(x, addr, label) \ 194 asm_volatile_goto("\n" \ 195 "1: movl %%eax,0(%1)\n" \ 196 "2: movl %%edx,4(%1)\n" \ 197 _ASM_EXTABLE_UA(1b, %l2) \ 198 _ASM_EXTABLE_UA(2b, %l2) \ 199 : : "A" (x), "r" (addr) \ 200 : : label) 201 202 #define __put_user_x8(x, ptr, __ret_pu) \ 203 asm volatile("call __put_user_8" : "=a" (__ret_pu) \ 204 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 205 #else 206 #define __put_user_goto_u64(x, ptr, label) \ 207 __put_user_goto(x, ptr, "q", "er", label) 208 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) 209 #endif 210 211 extern void __put_user_bad(void); 212 213 /* 214 * Strange magic calling convention: pointer in %ecx, 215 * value in %eax(:%edx), return value in %eax. clobbers %rbx 216 */ 217 extern void __put_user_1(void); 218 extern void __put_user_2(void); 219 extern void __put_user_4(void); 220 extern void __put_user_8(void); 221 222 /** 223 * put_user - Write a simple value into user space. 224 * @x: Value to copy to user space. 225 * @ptr: Destination address, in user space. 226 * 227 * Context: User context only. This function may sleep if pagefaults are 228 * enabled. 229 * 230 * This macro copies a single simple value from kernel space to user 231 * space. It supports simple types like char and int, but not larger 232 * data types like structures or arrays. 233 * 234 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 235 * to the result of dereferencing @ptr. 236 * 237 * Return: zero on success, or -EFAULT on error. 238 */ 239 #define put_user(x, ptr) \ 240 ({ \ 241 int __ret_pu; \ 242 __typeof__(*(ptr)) __pu_val; \ 243 __chk_user_ptr(ptr); \ 244 might_fault(); \ 245 __pu_val = x; \ 246 switch (sizeof(*(ptr))) { \ 247 case 1: \ 248 __put_user_x(1, __pu_val, ptr, __ret_pu); \ 249 break; \ 250 case 2: \ 251 __put_user_x(2, __pu_val, ptr, __ret_pu); \ 252 break; \ 253 case 4: \ 254 __put_user_x(4, __pu_val, ptr, __ret_pu); \ 255 break; \ 256 case 8: \ 257 __put_user_x8(__pu_val, ptr, __ret_pu); \ 258 break; \ 259 default: \ 260 __put_user_x(X, __pu_val, ptr, __ret_pu); \ 261 break; \ 262 } \ 263 __builtin_expect(__ret_pu, 0); \ 264 }) 265 266 #define __put_user_size(x, ptr, size, label) \ 267 do { \ 268 __chk_user_ptr(ptr); \ 269 switch (size) { \ 270 case 1: \ 271 __put_user_goto(x, ptr, "b", "iq", label); \ 272 break; \ 273 case 2: \ 274 __put_user_goto(x, ptr, "w", "ir", label); \ 275 break; \ 276 case 4: \ 277 __put_user_goto(x, ptr, "l", "ir", label); \ 278 break; \ 279 case 8: \ 280 __put_user_goto_u64(x, ptr, label); \ 281 break; \ 282 default: \ 283 __put_user_bad(); \ 284 } \ 285 } while (0) 286 287 #ifdef CONFIG_X86_32 288 #define __get_user_asm_u64(x, ptr, retval) \ 289 ({ \ 290 __typeof__(ptr) __ptr = (ptr); \ 291 asm volatile("\n" \ 292 "1: movl %[lowbits],%%eax\n" \ 293 "2: movl %[highbits],%%edx\n" \ 294 "3:\n" \ 295 ".section .fixup,\"ax\"\n" \ 296 "4: mov %[efault],%[errout]\n" \ 297 " xorl %%eax,%%eax\n" \ 298 " xorl %%edx,%%edx\n" \ 299 " jmp 3b\n" \ 300 ".previous\n" \ 301 _ASM_EXTABLE_UA(1b, 4b) \ 302 _ASM_EXTABLE_UA(2b, 4b) \ 303 : [errout] "=r" (retval), \ 304 [output] "=&A"(x) \ 305 : [lowbits] "m" (__m(__ptr)), \ 306 [highbits] "m" __m(((u32 __user *)(__ptr)) + 1), \ 307 [efault] "i" (-EFAULT), "0" (retval)); \ 308 }) 309 310 #else 311 #define __get_user_asm_u64(x, ptr, retval) \ 312 __get_user_asm(x, ptr, retval, "q", "=r") 313 #endif 314 315 #define __get_user_size(x, ptr, size, retval) \ 316 do { \ 317 unsigned char x_u8__; \ 318 \ 319 retval = 0; \ 320 __chk_user_ptr(ptr); \ 321 switch (size) { \ 322 case 1: \ 323 __get_user_asm(x_u8__, ptr, retval, "b", "=q"); \ 324 (x) = x_u8__; \ 325 break; \ 326 case 2: \ 327 __get_user_asm(x, ptr, retval, "w", "=r"); \ 328 break; \ 329 case 4: \ 330 __get_user_asm(x, ptr, retval, "l", "=r"); \ 331 break; \ 332 case 8: \ 333 __get_user_asm_u64(x, ptr, retval); \ 334 break; \ 335 default: \ 336 (x) = __get_user_bad(); \ 337 } \ 338 } while (0) 339 340 #define __get_user_asm(x, addr, err, itype, ltype) \ 341 asm volatile("\n" \ 342 "1: mov"itype" %[umem],%[output]\n" \ 343 "2:\n" \ 344 ".section .fixup,\"ax\"\n" \ 345 "3: mov %[efault],%[errout]\n" \ 346 " xor"itype" %[output],%[output]\n" \ 347 " jmp 2b\n" \ 348 ".previous\n" \ 349 _ASM_EXTABLE_UA(1b, 3b) \ 350 : [errout] "=r" (err), \ 351 [output] ltype(x) \ 352 : [umem] "m" (__m(addr)), \ 353 [efault] "i" (-EFAULT), "0" (err)) 354 355 #define __put_user_nocheck(x, ptr, size) \ 356 ({ \ 357 __label__ __pu_label; \ 358 int __pu_err = -EFAULT; \ 359 __typeof__(*(ptr)) __pu_val = (x); \ 360 __typeof__(ptr) __pu_ptr = (ptr); \ 361 __typeof__(size) __pu_size = (size); \ 362 __uaccess_begin(); \ 363 __put_user_size(__pu_val, __pu_ptr, __pu_size, __pu_label); \ 364 __pu_err = 0; \ 365 __pu_label: \ 366 __uaccess_end(); \ 367 __builtin_expect(__pu_err, 0); \ 368 }) 369 370 #define __get_user_nocheck(x, ptr, size) \ 371 ({ \ 372 int __gu_err; \ 373 __inttype(*(ptr)) __gu_val; \ 374 __typeof__(ptr) __gu_ptr = (ptr); \ 375 __typeof__(size) __gu_size = (size); \ 376 __uaccess_begin_nospec(); \ 377 __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err); \ 378 __uaccess_end(); \ 379 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 380 __builtin_expect(__gu_err, 0); \ 381 }) 382 383 /* FIXME: this hack is definitely wrong -AK */ 384 struct __large_struct { unsigned long buf[100]; }; 385 #define __m(x) (*(struct __large_struct __user *)(x)) 386 387 /* 388 * Tell gcc we read from memory instead of writing: this is because 389 * we do not write to any memory gcc knows about, so there are no 390 * aliasing issues. 391 */ 392 #define __put_user_goto(x, addr, itype, ltype, label) \ 393 asm_volatile_goto("\n" \ 394 "1: mov"itype" %0,%1\n" \ 395 _ASM_EXTABLE_UA(1b, %l2) \ 396 : : ltype(x), "m" (__m(addr)) \ 397 : : label) 398 399 /** 400 * __get_user - Get a simple variable from user space, with less checking. 401 * @x: Variable to store result. 402 * @ptr: Source address, in user space. 403 * 404 * Context: User context only. This function may sleep if pagefaults are 405 * enabled. 406 * 407 * This macro copies a single simple variable from user space to kernel 408 * space. It supports simple types like char and int, but not larger 409 * data types like structures or arrays. 410 * 411 * @ptr must have pointer-to-simple-variable type, and the result of 412 * dereferencing @ptr must be assignable to @x without a cast. 413 * 414 * Caller must check the pointer with access_ok() before calling this 415 * function. 416 * 417 * Return: zero on success, or -EFAULT on error. 418 * On error, the variable @x is set to zero. 419 */ 420 421 #define __get_user(x, ptr) \ 422 __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 423 424 /** 425 * __put_user - Write a simple value into user space, with less checking. 426 * @x: Value to copy to user space. 427 * @ptr: Destination address, in user space. 428 * 429 * Context: User context only. This function may sleep if pagefaults are 430 * enabled. 431 * 432 * This macro copies a single simple value from kernel space to user 433 * space. It supports simple types like char and int, but not larger 434 * data types like structures or arrays. 435 * 436 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 437 * to the result of dereferencing @ptr. 438 * 439 * Caller must check the pointer with access_ok() before calling this 440 * function. 441 * 442 * Return: zero on success, or -EFAULT on error. 443 */ 444 445 #define __put_user(x, ptr) \ 446 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 447 448 extern unsigned long 449 copy_from_user_nmi(void *to, const void __user *from, unsigned long n); 450 extern __must_check long 451 strncpy_from_user(char *dst, const char __user *src, long count); 452 453 extern __must_check long strnlen_user(const char __user *str, long n); 454 455 unsigned long __must_check clear_user(void __user *mem, unsigned long len); 456 unsigned long __must_check __clear_user(void __user *mem, unsigned long len); 457 458 /* 459 * movsl can be slow when source and dest are not both 8-byte aligned 460 */ 461 #ifdef CONFIG_X86_INTEL_USERCOPY 462 extern struct movsl_mask { 463 int mask; 464 } ____cacheline_aligned_in_smp movsl_mask; 465 #endif 466 467 #define ARCH_HAS_NOCACHE_UACCESS 1 468 469 #ifdef CONFIG_X86_32 470 # include <asm/uaccess_32.h> 471 #else 472 # include <asm/uaccess_64.h> 473 #endif 474 475 /* 476 * The "unsafe" user accesses aren't really "unsafe", but the naming 477 * is a big fat warning: you have to not only do the access_ok() 478 * checking before using them, but you have to surround them with the 479 * user_access_begin/end() pair. 480 */ 481 static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len) 482 { 483 if (unlikely(!access_ok(ptr,len))) 484 return 0; 485 __uaccess_begin_nospec(); 486 return 1; 487 } 488 #define user_access_begin(a,b) user_access_begin(a,b) 489 #define user_access_end() __uaccess_end() 490 491 #define user_access_save() smap_save() 492 #define user_access_restore(x) smap_restore(x) 493 494 #define unsafe_put_user(x, ptr, label) \ 495 __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label) 496 497 #define unsafe_get_user(x, ptr, err_label) \ 498 do { \ 499 int __gu_err; \ 500 __inttype(*(ptr)) __gu_val; \ 501 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err); \ 502 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 503 if (unlikely(__gu_err)) goto err_label; \ 504 } while (0) 505 506 /* 507 * We want the unsafe accessors to always be inlined and use 508 * the error labels - thus the macro games. 509 */ 510 #define unsafe_copy_loop(dst, src, len, type, label) \ 511 while (len >= sizeof(type)) { \ 512 unsafe_put_user(*(type *)(src),(type __user *)(dst),label); \ 513 dst += sizeof(type); \ 514 src += sizeof(type); \ 515 len -= sizeof(type); \ 516 } 517 518 #define unsafe_copy_to_user(_dst,_src,_len,label) \ 519 do { \ 520 char __user *__ucu_dst = (_dst); \ 521 const char *__ucu_src = (_src); \ 522 size_t __ucu_len = (_len); \ 523 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \ 524 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \ 525 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \ 526 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \ 527 } while (0) 528 529 #define HAVE_GET_KERNEL_NOFAULT 530 531 #define __get_kernel_nofault(dst, src, type, err_label) \ 532 do { \ 533 int __kr_err; \ 534 \ 535 __get_user_size(*((type *)(dst)), (__force type __user *)(src), \ 536 sizeof(type), __kr_err); \ 537 if (unlikely(__kr_err)) \ 538 goto err_label; \ 539 } while (0) 540 541 #define __put_kernel_nofault(dst, src, type, err_label) \ 542 __put_user_size(*((type *)(src)), (__force type __user *)(dst), \ 543 sizeof(type), err_label) 544 545 #endif /* _ASM_X86_UACCESS_H */ 546 547