1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_UACCESS_H 3 #define _ASM_X86_UACCESS_H 4 /* 5 * User space memory access functions 6 */ 7 #include <linux/compiler.h> 8 #include <linux/kasan-checks.h> 9 #include <linux/string.h> 10 #include <asm/asm.h> 11 #include <asm/page.h> 12 #include <asm/smap.h> 13 #include <asm/extable.h> 14 15 /* 16 * The fs value determines whether argument validity checking should be 17 * performed or not. If get_fs() == USER_DS, checking is performed, with 18 * get_fs() == KERNEL_DS, checking is bypassed. 19 * 20 * For historical reasons, these macros are grossly misnamed. 21 */ 22 23 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 24 25 #define KERNEL_DS MAKE_MM_SEG(-1UL) 26 #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX) 27 28 #define get_fs() (current->thread.addr_limit) 29 static inline void set_fs(mm_segment_t fs) 30 { 31 current->thread.addr_limit = fs; 32 /* On user-mode return, check fs is correct */ 33 set_thread_flag(TIF_FSCHECK); 34 } 35 36 #define segment_eq(a, b) ((a).seg == (b).seg) 37 #define user_addr_max() (current->thread.addr_limit.seg) 38 39 /* 40 * Test whether a block of memory is a valid user space address. 41 * Returns 0 if the range is valid, nonzero otherwise. 42 */ 43 static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit) 44 { 45 /* 46 * If we have used "sizeof()" for the size, 47 * we know it won't overflow the limit (but 48 * it might overflow the 'addr', so it's 49 * important to subtract the size from the 50 * limit, not add it to the address). 51 */ 52 if (__builtin_constant_p(size)) 53 return unlikely(addr > limit - size); 54 55 /* Arbitrary sizes? Be careful about overflow */ 56 addr += size; 57 if (unlikely(addr < size)) 58 return true; 59 return unlikely(addr > limit); 60 } 61 62 #define __range_not_ok(addr, size, limit) \ 63 ({ \ 64 __chk_user_ptr(addr); \ 65 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ 66 }) 67 68 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 69 static inline bool pagefault_disabled(void); 70 # define WARN_ON_IN_IRQ() \ 71 WARN_ON_ONCE(!in_task() && !pagefault_disabled()) 72 #else 73 # define WARN_ON_IN_IRQ() 74 #endif 75 76 /** 77 * access_ok - Checks if a user space pointer is valid 78 * @addr: User space pointer to start of block to check 79 * @size: Size of block to check 80 * 81 * Context: User context only. This function may sleep if pagefaults are 82 * enabled. 83 * 84 * Checks if a pointer to a block of memory in user space is valid. 85 * 86 * Note that, depending on architecture, this function probably just 87 * checks that the pointer is in the user space range - after calling 88 * this function, memory access functions may still return -EFAULT. 89 * 90 * Return: true (nonzero) if the memory block may be valid, false (zero) 91 * if it is definitely invalid. 92 */ 93 #define access_ok(addr, size) \ 94 ({ \ 95 WARN_ON_IN_IRQ(); \ 96 likely(!__range_not_ok(addr, size, user_addr_max())); \ 97 }) 98 99 /* 100 * These are the main single-value transfer routines. They automatically 101 * use the right size if we just have the right pointer type. 102 * 103 * This gets kind of ugly. We want to return _two_ values in "get_user()" 104 * and yet we don't want to do any pointers, because that is too much 105 * of a performance impact. Thus we have a few rather ugly macros here, 106 * and hide all the ugliness from the user. 107 * 108 * The "__xxx" versions of the user access functions are versions that 109 * do not verify the address space, that must have been done previously 110 * with a separate "access_ok()" call (this is used when we do multiple 111 * accesses to the same area of user memory). 112 */ 113 114 extern int __get_user_1(void); 115 extern int __get_user_2(void); 116 extern int __get_user_4(void); 117 extern int __get_user_8(void); 118 extern int __get_user_bad(void); 119 120 #define __uaccess_begin() stac() 121 #define __uaccess_end() clac() 122 #define __uaccess_begin_nospec() \ 123 ({ \ 124 stac(); \ 125 barrier_nospec(); \ 126 }) 127 128 /* 129 * This is the smallest unsigned integer type that can fit a value 130 * (up to 'long long') 131 */ 132 #define __inttype(x) __typeof__( \ 133 __typefits(x,char, \ 134 __typefits(x,short, \ 135 __typefits(x,int, \ 136 __typefits(x,long,0ULL))))) 137 138 #define __typefits(x,type,not) \ 139 __builtin_choose_expr(sizeof(x)<=sizeof(type),(unsigned type)0,not) 140 141 /** 142 * get_user - Get a simple variable from user space. 143 * @x: Variable to store result. 144 * @ptr: Source address, in user space. 145 * 146 * Context: User context only. This function may sleep if pagefaults are 147 * enabled. 148 * 149 * This macro copies a single simple variable from user space to kernel 150 * space. It supports simple types like char and int, but not larger 151 * data types like structures or arrays. 152 * 153 * @ptr must have pointer-to-simple-variable type, and the result of 154 * dereferencing @ptr must be assignable to @x without a cast. 155 * 156 * Return: zero on success, or -EFAULT on error. 157 * On error, the variable @x is set to zero. 158 */ 159 /* 160 * Careful: we have to cast the result to the type of the pointer 161 * for sign reasons. 162 * 163 * The use of _ASM_DX as the register specifier is a bit of a 164 * simplification, as gcc only cares about it as the starting point 165 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits 166 * (%ecx being the next register in gcc's x86 register sequence), and 167 * %rdx on 64 bits. 168 * 169 * Clang/LLVM cares about the size of the register, but still wants 170 * the base register for something that ends up being a pair. 171 */ 172 #define get_user(x, ptr) \ 173 ({ \ 174 int __ret_gu; \ 175 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ 176 __chk_user_ptr(ptr); \ 177 might_fault(); \ 178 asm volatile("call __get_user_%P4" \ 179 : "=a" (__ret_gu), "=r" (__val_gu), \ 180 ASM_CALL_CONSTRAINT \ 181 : "0" (ptr), "i" (sizeof(*(ptr)))); \ 182 (x) = (__force __typeof__(*(ptr))) __val_gu; \ 183 __builtin_expect(__ret_gu, 0); \ 184 }) 185 186 #define __put_user_x(size, x, ptr, __ret_pu) \ 187 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ 188 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 189 190 191 192 #ifdef CONFIG_X86_32 193 #define __put_user_goto_u64(x, addr, label) \ 194 asm_volatile_goto("\n" \ 195 "1: movl %%eax,0(%1)\n" \ 196 "2: movl %%edx,4(%1)\n" \ 197 _ASM_EXTABLE_UA(1b, %l2) \ 198 _ASM_EXTABLE_UA(2b, %l2) \ 199 : : "A" (x), "r" (addr) \ 200 : : label) 201 202 #define __put_user_x8(x, ptr, __ret_pu) \ 203 asm volatile("call __put_user_8" : "=a" (__ret_pu) \ 204 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 205 #else 206 #define __put_user_goto_u64(x, ptr, label) \ 207 __put_user_goto(x, ptr, "q", "er", label) 208 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) 209 #endif 210 211 extern void __put_user_bad(void); 212 213 /* 214 * Strange magic calling convention: pointer in %ecx, 215 * value in %eax(:%edx), return value in %eax. clobbers %rbx 216 */ 217 extern void __put_user_1(void); 218 extern void __put_user_2(void); 219 extern void __put_user_4(void); 220 extern void __put_user_8(void); 221 222 /** 223 * put_user - Write a simple value into user space. 224 * @x: Value to copy to user space. 225 * @ptr: Destination address, in user space. 226 * 227 * Context: User context only. This function may sleep if pagefaults are 228 * enabled. 229 * 230 * This macro copies a single simple value from kernel space to user 231 * space. It supports simple types like char and int, but not larger 232 * data types like structures or arrays. 233 * 234 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 235 * to the result of dereferencing @ptr. 236 * 237 * Return: zero on success, or -EFAULT on error. 238 */ 239 #define put_user(x, ptr) \ 240 ({ \ 241 int __ret_pu; \ 242 __typeof__(*(ptr)) __pu_val; \ 243 __chk_user_ptr(ptr); \ 244 might_fault(); \ 245 __pu_val = x; \ 246 switch (sizeof(*(ptr))) { \ 247 case 1: \ 248 __put_user_x(1, __pu_val, ptr, __ret_pu); \ 249 break; \ 250 case 2: \ 251 __put_user_x(2, __pu_val, ptr, __ret_pu); \ 252 break; \ 253 case 4: \ 254 __put_user_x(4, __pu_val, ptr, __ret_pu); \ 255 break; \ 256 case 8: \ 257 __put_user_x8(__pu_val, ptr, __ret_pu); \ 258 break; \ 259 default: \ 260 __put_user_x(X, __pu_val, ptr, __ret_pu); \ 261 break; \ 262 } \ 263 __builtin_expect(__ret_pu, 0); \ 264 }) 265 266 #define __put_user_size(x, ptr, size, label) \ 267 do { \ 268 __chk_user_ptr(ptr); \ 269 switch (size) { \ 270 case 1: \ 271 __put_user_goto(x, ptr, "b", "iq", label); \ 272 break; \ 273 case 2: \ 274 __put_user_goto(x, ptr, "w", "ir", label); \ 275 break; \ 276 case 4: \ 277 __put_user_goto(x, ptr, "l", "ir", label); \ 278 break; \ 279 case 8: \ 280 __put_user_goto_u64(x, ptr, label); \ 281 break; \ 282 default: \ 283 __put_user_bad(); \ 284 } \ 285 } while (0) 286 287 #ifdef CONFIG_X86_32 288 #define __get_user_asm_u64(x, ptr, retval) \ 289 ({ \ 290 __typeof__(ptr) __ptr = (ptr); \ 291 asm volatile("\n" \ 292 "1: movl %[lowbits],%%eax\n" \ 293 "2: movl %[highbits],%%edx\n" \ 294 "3:\n" \ 295 ".section .fixup,\"ax\"\n" \ 296 "4: mov %[efault],%[errout]\n" \ 297 " xorl %%eax,%%eax\n" \ 298 " xorl %%edx,%%edx\n" \ 299 " jmp 3b\n" \ 300 ".previous\n" \ 301 _ASM_EXTABLE_UA(1b, 4b) \ 302 _ASM_EXTABLE_UA(2b, 4b) \ 303 : [errout] "=r" (retval), \ 304 [output] "=&A"(x) \ 305 : [lowbits] "m" (__m(__ptr)), \ 306 [highbits] "m" __m(((u32 __user *)(__ptr)) + 1), \ 307 [efault] "i" (-EFAULT), "0" (retval)); \ 308 }) 309 310 #else 311 #define __get_user_asm_u64(x, ptr, retval) \ 312 __get_user_asm(x, ptr, retval, "q", "=r") 313 #endif 314 315 #define __get_user_size(x, ptr, size, retval) \ 316 do { \ 317 retval = 0; \ 318 __chk_user_ptr(ptr); \ 319 switch (size) { \ 320 case 1: \ 321 __get_user_asm(x, ptr, retval, "b", "=q"); \ 322 break; \ 323 case 2: \ 324 __get_user_asm(x, ptr, retval, "w", "=r"); \ 325 break; \ 326 case 4: \ 327 __get_user_asm(x, ptr, retval, "l", "=r"); \ 328 break; \ 329 case 8: \ 330 __get_user_asm_u64(x, ptr, retval); \ 331 break; \ 332 default: \ 333 (x) = __get_user_bad(); \ 334 } \ 335 } while (0) 336 337 #define __get_user_asm(x, addr, err, itype, ltype) \ 338 asm volatile("\n" \ 339 "1: mov"itype" %[umem],%[output]\n" \ 340 "2:\n" \ 341 ".section .fixup,\"ax\"\n" \ 342 "3: mov %[efault],%[errout]\n" \ 343 " xor"itype" %[output],%[output]\n" \ 344 " jmp 2b\n" \ 345 ".previous\n" \ 346 _ASM_EXTABLE_UA(1b, 3b) \ 347 : [errout] "=r" (err), \ 348 [output] ltype(x) \ 349 : [umem] "m" (__m(addr)), \ 350 [efault] "i" (-EFAULT), "0" (err)) 351 352 #define __put_user_nocheck(x, ptr, size) \ 353 ({ \ 354 __label__ __pu_label; \ 355 int __pu_err = -EFAULT; \ 356 __typeof__(*(ptr)) __pu_val = (x); \ 357 __typeof__(ptr) __pu_ptr = (ptr); \ 358 __typeof__(size) __pu_size = (size); \ 359 __uaccess_begin(); \ 360 __put_user_size(__pu_val, __pu_ptr, __pu_size, __pu_label); \ 361 __pu_err = 0; \ 362 __pu_label: \ 363 __uaccess_end(); \ 364 __builtin_expect(__pu_err, 0); \ 365 }) 366 367 #define __get_user_nocheck(x, ptr, size) \ 368 ({ \ 369 int __gu_err; \ 370 __inttype(*(ptr)) __gu_val; \ 371 __typeof__(ptr) __gu_ptr = (ptr); \ 372 __typeof__(size) __gu_size = (size); \ 373 __uaccess_begin_nospec(); \ 374 __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err); \ 375 __uaccess_end(); \ 376 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 377 __builtin_expect(__gu_err, 0); \ 378 }) 379 380 /* FIXME: this hack is definitely wrong -AK */ 381 struct __large_struct { unsigned long buf[100]; }; 382 #define __m(x) (*(struct __large_struct __user *)(x)) 383 384 /* 385 * Tell gcc we read from memory instead of writing: this is because 386 * we do not write to any memory gcc knows about, so there are no 387 * aliasing issues. 388 */ 389 #define __put_user_goto(x, addr, itype, ltype, label) \ 390 asm_volatile_goto("\n" \ 391 "1: mov"itype" %0,%1\n" \ 392 _ASM_EXTABLE_UA(1b, %l2) \ 393 : : ltype(x), "m" (__m(addr)) \ 394 : : label) 395 396 /** 397 * __get_user - Get a simple variable from user space, with less checking. 398 * @x: Variable to store result. 399 * @ptr: Source address, in user space. 400 * 401 * Context: User context only. This function may sleep if pagefaults are 402 * enabled. 403 * 404 * This macro copies a single simple variable from user space to kernel 405 * space. It supports simple types like char and int, but not larger 406 * data types like structures or arrays. 407 * 408 * @ptr must have pointer-to-simple-variable type, and the result of 409 * dereferencing @ptr must be assignable to @x without a cast. 410 * 411 * Caller must check the pointer with access_ok() before calling this 412 * function. 413 * 414 * Return: zero on success, or -EFAULT on error. 415 * On error, the variable @x is set to zero. 416 */ 417 418 #define __get_user(x, ptr) \ 419 __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 420 421 /** 422 * __put_user - Write a simple value into user space, with less checking. 423 * @x: Value to copy to user space. 424 * @ptr: Destination address, in user space. 425 * 426 * Context: User context only. This function may sleep if pagefaults are 427 * enabled. 428 * 429 * This macro copies a single simple value from kernel space to user 430 * space. It supports simple types like char and int, but not larger 431 * data types like structures or arrays. 432 * 433 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 434 * to the result of dereferencing @ptr. 435 * 436 * Caller must check the pointer with access_ok() before calling this 437 * function. 438 * 439 * Return: zero on success, or -EFAULT on error. 440 */ 441 442 #define __put_user(x, ptr) \ 443 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 444 445 extern unsigned long 446 copy_from_user_nmi(void *to, const void __user *from, unsigned long n); 447 extern __must_check long 448 strncpy_from_user(char *dst, const char __user *src, long count); 449 450 extern __must_check long strnlen_user(const char __user *str, long n); 451 452 unsigned long __must_check clear_user(void __user *mem, unsigned long len); 453 unsigned long __must_check __clear_user(void __user *mem, unsigned long len); 454 455 /* 456 * movsl can be slow when source and dest are not both 8-byte aligned 457 */ 458 #ifdef CONFIG_X86_INTEL_USERCOPY 459 extern struct movsl_mask { 460 int mask; 461 } ____cacheline_aligned_in_smp movsl_mask; 462 #endif 463 464 #define ARCH_HAS_NOCACHE_UACCESS 1 465 466 #ifdef CONFIG_X86_32 467 # include <asm/uaccess_32.h> 468 #else 469 # include <asm/uaccess_64.h> 470 #endif 471 472 /* 473 * The "unsafe" user accesses aren't really "unsafe", but the naming 474 * is a big fat warning: you have to not only do the access_ok() 475 * checking before using them, but you have to surround them with the 476 * user_access_begin/end() pair. 477 */ 478 static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len) 479 { 480 if (unlikely(!access_ok(ptr,len))) 481 return 0; 482 __uaccess_begin_nospec(); 483 return 1; 484 } 485 #define user_access_begin(a,b) user_access_begin(a,b) 486 #define user_access_end() __uaccess_end() 487 488 #define user_access_save() smap_save() 489 #define user_access_restore(x) smap_restore(x) 490 491 #define unsafe_put_user(x, ptr, label) \ 492 __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label) 493 494 #define unsafe_get_user(x, ptr, err_label) \ 495 do { \ 496 int __gu_err; \ 497 __inttype(*(ptr)) __gu_val; \ 498 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err); \ 499 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 500 if (unlikely(__gu_err)) goto err_label; \ 501 } while (0) 502 503 /* 504 * We want the unsafe accessors to always be inlined and use 505 * the error labels - thus the macro games. 506 */ 507 #define unsafe_copy_loop(dst, src, len, type, label) \ 508 while (len >= sizeof(type)) { \ 509 unsafe_put_user(*(type *)(src),(type __user *)(dst),label); \ 510 dst += sizeof(type); \ 511 src += sizeof(type); \ 512 len -= sizeof(type); \ 513 } 514 515 #define unsafe_copy_to_user(_dst,_src,_len,label) \ 516 do { \ 517 char __user *__ucu_dst = (_dst); \ 518 const char *__ucu_src = (_src); \ 519 size_t __ucu_len = (_len); \ 520 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \ 521 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \ 522 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \ 523 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \ 524 } while (0) 525 526 #define HAVE_GET_KERNEL_NOFAULT 527 528 #define __get_kernel_nofault(dst, src, type, err_label) \ 529 do { \ 530 int __kr_err; \ 531 \ 532 __get_user_size(*((type *)(dst)), (__force type __user *)(src), \ 533 sizeof(type), __kr_err); \ 534 if (unlikely(__kr_err)) \ 535 goto err_label; \ 536 } while (0) 537 538 #define __put_kernel_nofault(dst, src, type, err_label) \ 539 __put_user_size(*((type *)(src)), (__force type __user *)(dst), \ 540 sizeof(type), err_label) 541 542 #endif /* _ASM_X86_UACCESS_H */ 543 544