xref: /openbmc/linux/arch/x86/include/asm/uaccess_64.h (revision 7dd65feb)
1 #ifndef _ASM_X86_UACCESS_64_H
2 #define _ASM_X86_UACCESS_64_H
3 
4 /*
5  * User space memory access functions
6  */
7 #include <linux/compiler.h>
8 #include <linux/errno.h>
9 #include <linux/prefetch.h>
10 #include <linux/lockdep.h>
11 #include <asm/page.h>
12 
13 /*
14  * Copy To/From Userspace
15  */
16 
17 /* Handles exceptions in both to and from, but doesn't do access_ok */
18 __must_check unsigned long
19 copy_user_generic(void *to, const void *from, unsigned len);
20 
21 __must_check unsigned long
22 _copy_to_user(void __user *to, const void *from, unsigned len);
23 __must_check unsigned long
24 _copy_from_user(void *to, const void __user *from, unsigned len);
25 __must_check unsigned long
26 copy_in_user(void __user *to, const void __user *from, unsigned len);
27 
28 static inline unsigned long __must_check copy_from_user(void *to,
29 					  const void __user *from,
30 					  unsigned long n)
31 {
32 	int sz = __compiletime_object_size(to);
33 
34 	might_fault();
35 	if (likely(sz == -1 || sz >= n))
36 		n = _copy_from_user(to, from, n);
37 #ifdef CONFIG_DEBUG_VM
38 	else
39 		WARN(1, "Buffer overflow detected!\n");
40 #endif
41 	return n;
42 }
43 
44 static __always_inline __must_check
45 int copy_to_user(void __user *dst, const void *src, unsigned size)
46 {
47 	might_fault();
48 
49 	return _copy_to_user(dst, src, size);
50 }
51 
52 static __always_inline __must_check
53 int __copy_from_user(void *dst, const void __user *src, unsigned size)
54 {
55 	int ret = 0;
56 
57 	might_fault();
58 	if (!__builtin_constant_p(size))
59 		return copy_user_generic(dst, (__force void *)src, size);
60 	switch (size) {
61 	case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
62 			      ret, "b", "b", "=q", 1);
63 		return ret;
64 	case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
65 			      ret, "w", "w", "=r", 2);
66 		return ret;
67 	case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
68 			      ret, "l", "k", "=r", 4);
69 		return ret;
70 	case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
71 			      ret, "q", "", "=r", 8);
72 		return ret;
73 	case 10:
74 		__get_user_asm(*(u64 *)dst, (u64 __user *)src,
75 			       ret, "q", "", "=r", 10);
76 		if (unlikely(ret))
77 			return ret;
78 		__get_user_asm(*(u16 *)(8 + (char *)dst),
79 			       (u16 __user *)(8 + (char __user *)src),
80 			       ret, "w", "w", "=r", 2);
81 		return ret;
82 	case 16:
83 		__get_user_asm(*(u64 *)dst, (u64 __user *)src,
84 			       ret, "q", "", "=r", 16);
85 		if (unlikely(ret))
86 			return ret;
87 		__get_user_asm(*(u64 *)(8 + (char *)dst),
88 			       (u64 __user *)(8 + (char __user *)src),
89 			       ret, "q", "", "=r", 8);
90 		return ret;
91 	default:
92 		return copy_user_generic(dst, (__force void *)src, size);
93 	}
94 }
95 
96 static __always_inline __must_check
97 int __copy_to_user(void __user *dst, const void *src, unsigned size)
98 {
99 	int ret = 0;
100 
101 	might_fault();
102 	if (!__builtin_constant_p(size))
103 		return copy_user_generic((__force void *)dst, src, size);
104 	switch (size) {
105 	case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
106 			      ret, "b", "b", "iq", 1);
107 		return ret;
108 	case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
109 			      ret, "w", "w", "ir", 2);
110 		return ret;
111 	case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
112 			      ret, "l", "k", "ir", 4);
113 		return ret;
114 	case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
115 			      ret, "q", "", "er", 8);
116 		return ret;
117 	case 10:
118 		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
119 			       ret, "q", "", "er", 10);
120 		if (unlikely(ret))
121 			return ret;
122 		asm("":::"memory");
123 		__put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
124 			       ret, "w", "w", "ir", 2);
125 		return ret;
126 	case 16:
127 		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
128 			       ret, "q", "", "er", 16);
129 		if (unlikely(ret))
130 			return ret;
131 		asm("":::"memory");
132 		__put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
133 			       ret, "q", "", "er", 8);
134 		return ret;
135 	default:
136 		return copy_user_generic((__force void *)dst, src, size);
137 	}
138 }
139 
140 static __always_inline __must_check
141 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
142 {
143 	int ret = 0;
144 
145 	might_fault();
146 	if (!__builtin_constant_p(size))
147 		return copy_user_generic((__force void *)dst,
148 					 (__force void *)src, size);
149 	switch (size) {
150 	case 1: {
151 		u8 tmp;
152 		__get_user_asm(tmp, (u8 __user *)src,
153 			       ret, "b", "b", "=q", 1);
154 		if (likely(!ret))
155 			__put_user_asm(tmp, (u8 __user *)dst,
156 				       ret, "b", "b", "iq", 1);
157 		return ret;
158 	}
159 	case 2: {
160 		u16 tmp;
161 		__get_user_asm(tmp, (u16 __user *)src,
162 			       ret, "w", "w", "=r", 2);
163 		if (likely(!ret))
164 			__put_user_asm(tmp, (u16 __user *)dst,
165 				       ret, "w", "w", "ir", 2);
166 		return ret;
167 	}
168 
169 	case 4: {
170 		u32 tmp;
171 		__get_user_asm(tmp, (u32 __user *)src,
172 			       ret, "l", "k", "=r", 4);
173 		if (likely(!ret))
174 			__put_user_asm(tmp, (u32 __user *)dst,
175 				       ret, "l", "k", "ir", 4);
176 		return ret;
177 	}
178 	case 8: {
179 		u64 tmp;
180 		__get_user_asm(tmp, (u64 __user *)src,
181 			       ret, "q", "", "=r", 8);
182 		if (likely(!ret))
183 			__put_user_asm(tmp, (u64 __user *)dst,
184 				       ret, "q", "", "er", 8);
185 		return ret;
186 	}
187 	default:
188 		return copy_user_generic((__force void *)dst,
189 					 (__force void *)src, size);
190 	}
191 }
192 
193 __must_check long
194 strncpy_from_user(char *dst, const char __user *src, long count);
195 __must_check long
196 __strncpy_from_user(char *dst, const char __user *src, long count);
197 __must_check long strnlen_user(const char __user *str, long n);
198 __must_check long __strnlen_user(const char __user *str, long n);
199 __must_check long strlen_user(const char __user *str);
200 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
201 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
202 
203 static __must_check __always_inline int
204 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
205 {
206 	return copy_user_generic(dst, (__force const void *)src, size);
207 }
208 
209 static __must_check __always_inline int
210 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
211 {
212 	return copy_user_generic((__force void *)dst, src, size);
213 }
214 
215 extern long __copy_user_nocache(void *dst, const void __user *src,
216 				unsigned size, int zerorest);
217 
218 static inline int
219 __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
220 {
221 	might_sleep();
222 	return __copy_user_nocache(dst, src, size, 1);
223 }
224 
225 static inline int
226 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
227 				  unsigned size)
228 {
229 	return __copy_user_nocache(dst, src, size, 0);
230 }
231 
232 unsigned long
233 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
234 
235 #endif /* _ASM_X86_UACCESS_64_H */
236