1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_UACCESS_64_H 3 #define _ASM_X86_UACCESS_64_H 4 5 /* 6 * User space memory access functions 7 */ 8 #include <linux/compiler.h> 9 #include <linux/lockdep.h> 10 #include <linux/kasan-checks.h> 11 #include <asm/alternative.h> 12 #include <asm/cpufeatures.h> 13 #include <asm/page.h> 14 15 /* 16 * Copy To/From Userspace 17 */ 18 19 /* Handles exceptions in both to and from, but doesn't do access_ok */ 20 __must_check unsigned long 21 copy_user_enhanced_fast_string(void *to, const void *from, unsigned len); 22 __must_check unsigned long 23 copy_user_generic_string(void *to, const void *from, unsigned len); 24 __must_check unsigned long 25 copy_user_generic_unrolled(void *to, const void *from, unsigned len); 26 27 static __always_inline __must_check unsigned long 28 copy_user_generic(void *to, const void *from, unsigned len) 29 { 30 unsigned ret; 31 32 /* 33 * If CPU has ERMS feature, use copy_user_enhanced_fast_string. 34 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string. 35 * Otherwise, use copy_user_generic_unrolled. 36 */ 37 alternative_call_2(copy_user_generic_unrolled, 38 copy_user_generic_string, 39 X86_FEATURE_REP_GOOD, 40 copy_user_enhanced_fast_string, 41 X86_FEATURE_ERMS, 42 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from), 43 "=d" (len)), 44 "1" (to), "2" (from), "3" (len) 45 : "memory", "rcx", "r8", "r9", "r10", "r11"); 46 return ret; 47 } 48 49 static __always_inline __must_check unsigned long 50 raw_copy_from_user(void *dst, const void __user *src, unsigned long size) 51 { 52 return copy_user_generic(dst, (__force void *)src, size); 53 } 54 55 static __always_inline __must_check unsigned long 56 raw_copy_to_user(void __user *dst, const void *src, unsigned long size) 57 { 58 return copy_user_generic((__force void *)dst, src, size); 59 } 60 61 extern long __copy_user_nocache(void *dst, const void __user *src, 62 unsigned size, int zerorest); 63 64 extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size); 65 extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset, 66 size_t len); 67 68 static inline int 69 __copy_from_user_inatomic_nocache(void *dst, const void __user *src, 70 unsigned size) 71 { 72 kasan_check_write(dst, size); 73 return __copy_user_nocache(dst, src, size, 0); 74 } 75 76 static inline int 77 __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) 78 { 79 kasan_check_write(dst, size); 80 return __copy_user_flushcache(dst, src, size); 81 } 82 83 /* 84 * Zero Userspace. 85 */ 86 87 __must_check unsigned long 88 clear_user_original(void __user *addr, unsigned long len); 89 __must_check unsigned long 90 clear_user_rep_good(void __user *addr, unsigned long len); 91 __must_check unsigned long 92 clear_user_erms(void __user *addr, unsigned long len); 93 94 static __always_inline __must_check unsigned long __clear_user(void __user *addr, unsigned long size) 95 { 96 might_fault(); 97 stac(); 98 99 /* 100 * No memory constraint because it doesn't change any memory gcc 101 * knows about. 102 */ 103 asm volatile( 104 "1:\n\t" 105 ALTERNATIVE_3("rep stosb", 106 "call clear_user_erms", ALT_NOT(X86_FEATURE_FSRM), 107 "call clear_user_rep_good", ALT_NOT(X86_FEATURE_ERMS), 108 "call clear_user_original", ALT_NOT(X86_FEATURE_REP_GOOD)) 109 "2:\n" 110 _ASM_EXTABLE_UA(1b, 2b) 111 : "+c" (size), "+D" (addr), ASM_CALL_CONSTRAINT 112 : "a" (0) 113 /* rep_good clobbers %rdx */ 114 : "rdx"); 115 116 clac(); 117 118 return size; 119 } 120 121 static __always_inline unsigned long clear_user(void __user *to, unsigned long n) 122 { 123 if (access_ok(to, n)) 124 return __clear_user(to, n); 125 return n; 126 } 127 #endif /* _ASM_X86_UACCESS_64_H */ 128