xref: /openbmc/linux/arch/x86/include/asm/uaccess_64.h (revision ba61bb17)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_UACCESS_64_H
3 #define _ASM_X86_UACCESS_64_H
4 
5 /*
6  * User space memory access functions
7  */
8 #include <linux/compiler.h>
9 #include <linux/lockdep.h>
10 #include <linux/kasan-checks.h>
11 #include <asm/alternative.h>
12 #include <asm/cpufeatures.h>
13 #include <asm/page.h>
14 
15 /*
16  * Copy To/From Userspace
17  */
18 
19 /* Handles exceptions in both to and from, but doesn't do access_ok */
20 __must_check unsigned long
21 copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
22 __must_check unsigned long
23 copy_user_generic_string(void *to, const void *from, unsigned len);
24 __must_check unsigned long
25 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
26 
27 static __always_inline __must_check unsigned long
28 copy_user_generic(void *to, const void *from, unsigned len)
29 {
30 	unsigned ret;
31 
32 	/*
33 	 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
34 	 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
35 	 * Otherwise, use copy_user_generic_unrolled.
36 	 */
37 	alternative_call_2(copy_user_generic_unrolled,
38 			 copy_user_generic_string,
39 			 X86_FEATURE_REP_GOOD,
40 			 copy_user_enhanced_fast_string,
41 			 X86_FEATURE_ERMS,
42 			 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
43 				     "=d" (len)),
44 			 "1" (to), "2" (from), "3" (len)
45 			 : "memory", "rcx", "r8", "r9", "r10", "r11");
46 	return ret;
47 }
48 
49 static __always_inline __must_check unsigned long
50 copy_to_user_mcsafe(void *to, const void *from, unsigned len)
51 {
52 	unsigned long ret;
53 
54 	__uaccess_begin();
55 	ret = memcpy_mcsafe(to, from, len);
56 	__uaccess_end();
57 	return ret;
58 }
59 
60 static __always_inline __must_check unsigned long
61 raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
62 {
63 	int ret = 0;
64 
65 	if (!__builtin_constant_p(size))
66 		return copy_user_generic(dst, (__force void *)src, size);
67 	switch (size) {
68 	case 1:
69 		__uaccess_begin_nospec();
70 		__get_user_asm_nozero(*(u8 *)dst, (u8 __user *)src,
71 			      ret, "b", "b", "=q", 1);
72 		__uaccess_end();
73 		return ret;
74 	case 2:
75 		__uaccess_begin_nospec();
76 		__get_user_asm_nozero(*(u16 *)dst, (u16 __user *)src,
77 			      ret, "w", "w", "=r", 2);
78 		__uaccess_end();
79 		return ret;
80 	case 4:
81 		__uaccess_begin_nospec();
82 		__get_user_asm_nozero(*(u32 *)dst, (u32 __user *)src,
83 			      ret, "l", "k", "=r", 4);
84 		__uaccess_end();
85 		return ret;
86 	case 8:
87 		__uaccess_begin_nospec();
88 		__get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
89 			      ret, "q", "", "=r", 8);
90 		__uaccess_end();
91 		return ret;
92 	case 10:
93 		__uaccess_begin_nospec();
94 		__get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
95 			       ret, "q", "", "=r", 10);
96 		if (likely(!ret))
97 			__get_user_asm_nozero(*(u16 *)(8 + (char *)dst),
98 				       (u16 __user *)(8 + (char __user *)src),
99 				       ret, "w", "w", "=r", 2);
100 		__uaccess_end();
101 		return ret;
102 	case 16:
103 		__uaccess_begin_nospec();
104 		__get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
105 			       ret, "q", "", "=r", 16);
106 		if (likely(!ret))
107 			__get_user_asm_nozero(*(u64 *)(8 + (char *)dst),
108 				       (u64 __user *)(8 + (char __user *)src),
109 				       ret, "q", "", "=r", 8);
110 		__uaccess_end();
111 		return ret;
112 	default:
113 		return copy_user_generic(dst, (__force void *)src, size);
114 	}
115 }
116 
117 static __always_inline __must_check unsigned long
118 raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
119 {
120 	int ret = 0;
121 
122 	if (!__builtin_constant_p(size))
123 		return copy_user_generic((__force void *)dst, src, size);
124 	switch (size) {
125 	case 1:
126 		__uaccess_begin();
127 		__put_user_asm(*(u8 *)src, (u8 __user *)dst,
128 			      ret, "b", "b", "iq", 1);
129 		__uaccess_end();
130 		return ret;
131 	case 2:
132 		__uaccess_begin();
133 		__put_user_asm(*(u16 *)src, (u16 __user *)dst,
134 			      ret, "w", "w", "ir", 2);
135 		__uaccess_end();
136 		return ret;
137 	case 4:
138 		__uaccess_begin();
139 		__put_user_asm(*(u32 *)src, (u32 __user *)dst,
140 			      ret, "l", "k", "ir", 4);
141 		__uaccess_end();
142 		return ret;
143 	case 8:
144 		__uaccess_begin();
145 		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
146 			      ret, "q", "", "er", 8);
147 		__uaccess_end();
148 		return ret;
149 	case 10:
150 		__uaccess_begin();
151 		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
152 			       ret, "q", "", "er", 10);
153 		if (likely(!ret)) {
154 			asm("":::"memory");
155 			__put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
156 				       ret, "w", "w", "ir", 2);
157 		}
158 		__uaccess_end();
159 		return ret;
160 	case 16:
161 		__uaccess_begin();
162 		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
163 			       ret, "q", "", "er", 16);
164 		if (likely(!ret)) {
165 			asm("":::"memory");
166 			__put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
167 				       ret, "q", "", "er", 8);
168 		}
169 		__uaccess_end();
170 		return ret;
171 	default:
172 		return copy_user_generic((__force void *)dst, src, size);
173 	}
174 }
175 
176 static __always_inline __must_check
177 unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long size)
178 {
179 	return copy_user_generic((__force void *)dst,
180 				 (__force void *)src, size);
181 }
182 
183 extern long __copy_user_nocache(void *dst, const void __user *src,
184 				unsigned size, int zerorest);
185 
186 extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
187 extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
188 			   size_t len);
189 
190 static inline int
191 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
192 				  unsigned size)
193 {
194 	kasan_check_write(dst, size);
195 	return __copy_user_nocache(dst, src, size, 0);
196 }
197 
198 static inline int
199 __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
200 {
201 	kasan_check_write(dst, size);
202 	return __copy_user_flushcache(dst, src, size);
203 }
204 
205 unsigned long
206 copy_user_handle_tail(char *to, char *from, unsigned len);
207 
208 unsigned long
209 mcsafe_handle_tail(char *to, char *from, unsigned len);
210 
211 #endif /* _ASM_X86_UACCESS_64_H */
212